From 8d7d79d54b9160425fc5050b3484bec40dd3b44e Mon Sep 17 00:00:00 2001 From: Tim Jaeryang Baek Date: Tue, 7 Oct 2025 16:20:27 -0500 Subject: [PATCH] 0.6.33 (#18118) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: improve ollama model management experience This commit introduces several improvements to the Ollama model management modal: - Adds a cancel button to the model pulling operation, using the existing 'x' button pattern. - Adds a cancel button to the "Update All" models operation, allowing the user to cancel the update for the currently processing model. - Cleans up toast notifications when updating all models. A single toast is now shown at the beginning and a summary toast at the end, preventing notification spam. - Refactors the `ManageOllama.svelte` component to support these new cancellation features. - Adds tooltips to all buttons in the modal to improve clarity. - Disables buttons when their corresponding input fields are empty to prevent accidental clicks. * fix * i18n: improve Chinese translation * fix: handle non‑UTF8 chars in third‑party responses without error * German translation of new strings in i18n * log web search queries only with level 'debug' instead of 'info' * Tool calls now only include text and dont inlcude other content like image b64 * fix onedrive * fix: discovery url * fix: default permissions not being loaded * fix: ai hallucination * fix: non rich text input copy * refac: rm print statements * refac: disable direct models from model editors * refac/fix: do not process xlsx files with azure doc intelligence * Update pull_request_template.md * Update generated image translation in DE-de * added missing danish translations * feat(onedrive): Enable search and "My Organization" pivot * style(onedrive): Formatting fix * feat: Implement toggling for vertical and horizontal flow layouts This commit introduces the necessary logic and UI controls to allow users to switch the Flow component layout between vertical and horizontal orientations. * **`Flow.svelte` Refactoring:** * Updates logic for calculating level offsets and node positions to consistently respect the current flow orientation. * Adds a control panel using `` and `` components. * Provides user interface elements to easily switch the flow layout between horizontal and vertical orientations. * build(deps): bump pydantic from 2.11.7 to 2.11.9 in /backend Bumps [pydantic](https://github.com/pydantic/pydantic) from 2.11.7 to 2.11.9. - [Release notes](https://github.com/pydantic/pydantic/releases) - [Changelog](https://github.com/pydantic/pydantic/blob/v2.11.9/HISTORY.md) - [Commits](https://github.com/pydantic/pydantic/compare/v2.11.7...v2.11.9) --- updated-dependencies: - dependency-name: pydantic dependency-version: 2.11.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * build(deps): bump black from 25.1.0 to 25.9.0 in /backend Bumps [black](https://github.com/psf/black) from 25.1.0 to 25.9.0. - [Release notes](https://github.com/psf/black/releases) - [Changelog](https://github.com/psf/black/blob/main/CHANGES.md) - [Commits](https://github.com/psf/black/compare/25.1.0...25.9.0) --- updated-dependencies: - dependency-name: black dependency-version: 25.9.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump markdown from 3.8.2 to 3.9 in /backend Bumps [markdown](https://github.com/Python-Markdown/markdown) from 3.8.2 to 3.9. - [Release notes](https://github.com/Python-Markdown/markdown/releases) - [Changelog](https://github.com/Python-Markdown/markdown/blob/master/docs/changelog.md) - [Commits](https://github.com/Python-Markdown/markdown/compare/3.8.2...3.9.0) --- updated-dependencies: - dependency-name: markdown dependency-version: '3.9' dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump chromadb from 1.0.20 to 1.1.0 in /backend Bumps [chromadb](https://github.com/chroma-core/chroma) from 1.0.20 to 1.1.0. - [Release notes](https://github.com/chroma-core/chroma/releases) - [Changelog](https://github.com/chroma-core/chroma/blob/main/RELEASE_PROCESS.md) - [Commits](https://github.com/chroma-core/chroma/compare/1.0.20...1.1.0) --- updated-dependencies: - dependency-name: chromadb dependency-version: 1.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * build(deps): bump opentelemetry-api from 1.36.0 to 1.37.0 Bumps [opentelemetry-api](https://github.com/open-telemetry/opentelemetry-python) from 1.36.0 to 1.37.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-python/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-python/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-python/compare/v1.36.0...v1.37.0) --- updated-dependencies: - dependency-name: opentelemetry-api dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * refac: ollama embed form data * fix: non rich text handling * fix: oauth client registration * refac * chore: dep bump * chore: fastapi bump * chore/refac: bump bcrypt and remove passlib * Improving Korean Translation * refac * Improving Korean Translation * feat: PWA share_target implementation Co-Authored-By: gjveld <19951982+gjveld@users.noreply.github.com> * refac: message input mobile detection behaviour * feat: model_ids per folder * Update translation.json (pt-BR) inclusion of new translations of items that have been added * refac * refac * refac * refac * refac/fix: temp chat * refac * refac: stop task * refac/fix: azure audio escape * refac: external tool validation * refac/enh: start.sh additional args support * refac * refac: styling * refac/fix: direct connection floating action buttons * refac/fix: system prompt duplication * refac/enh: openai tts additional params support * refac * feat: load data in parallel to accelerate page loading speed * i18n: improve Chinese translation * refac * refac: model selector * UPD: i18n es-ES Translation v0.6.33 UPD: i18n es-ES Translation v0.6.33 Updated new strings. * refac * improved query pref by querying only relevant columns * refac/enh: docling params * refac * refac: openai additional headers support * refac * FEAT: Add Vega Char Visualizer Renderer ### FEAT: Add Vega Char Visualizer Renderer Feature required in https://github.com/open-webui/open-webui/discussions/18022 Added npm vega lib to package.json Added function for visualization renderer to src/libs/utils/index.ts Added logic to src/lib/components/chat/Messages/CodeBlock.svelte The treatment is similar as for mermaid diagrams. Reference: https://vega.github.io/vega/ * refac * chore * refac * FEAT: Add Vega-Lite Char Visualizer Renderer ### FEAT: Add Vega Char Visualizer Renderer Add suport for Vega-Lite Specifications. Vega-Lite is a "compiled" version of Vega Char Visualizer. For be rendered with Vega it have to be compiled. This PR add the check and compile if necessary, is a complement of recent Vega Renderer Feature added. * refac * refac/fix: switch * enh/refac: url input handling * refac * refac: styling * UPD: Add Validators & Error Toast for Mermaid & Vega diagrams ### UPD: Feat: Add Validators & Error Toast for Mermaid & Vega diagrams Description: As many time the diagrams generated or entered have syntax errors the diagrams are not rendered due to that errors, but as there isn't any notification is difficult to know what happend. This PR add validator and toast notification when error on Mermaid and Vega/Vega-Lite diagrams, helping the user to fix its. * removed redundant knowledge API call * Fix Code Format * refac: model workspace view * refac * refac: knowledge * refac: prompts * refac: tools * refac * feat: attach folder * refac: make tencentcloud-sdk-python optional * refac/fix: oauth * enh: ENABLE_OAUTH_EMAIL_FALLBACK * refac/fix: folders * Update requirements.txt * Update pyproject.toml * UPD: Add Validators & Error Toast for Mermaid & Vega diagrams ### UPD: Feat: Add Validators & Error Toast for Mermaid & Vega diagrams Description: As many time the diagrams generated or entered have syntax errors the diagrams are not rendered due to that errors, but as there isn't any notification is difficult to know what happend. This PR add validator and toast notification when error on Mermaid and Vega/Vega-Lite diagrams, helping the user to fix its. Note: Another possibility of integrating this Graph Visualizer is through its svelte component: https://github.com/vega/svelte-vega/tree/main/packages/svelte-vega * Removed unused toast import & Code Format * refac * refac: external tool server view * refac * refac: overview * refac: styling * refac * Update bug_report.yaml * refac * refac * refac * refac * refac: oauth client fallback * Fixed: Cannot handle batch sizes > 1 if no padding token is defined Fixes Cannot handle batch sizes > 1 if no padding token is defined For reranker models that do not have this defined in their config by using the eos_token_id if present as pad_token_id. * refac: fallback to reasoning content * fix(i18n): corrected typo in Spanish translation for "Reasoning Tags" Typo fixed in Spanish translation file at line 1240 of `open-webui/src/lib/i18n/locales/es-ES/translation.json`: - Incorrect: "Eriquetas de Razonamiento" - Correct: "Etiquetas de Razonamiento" This improves clarity and consistency in the UI. * refac/fix: ENABLE_STAR_SESSIONS_MIDDLEWARE * refac/fix: redirect * refac * refac * refac * refac: web search error handling * refac: source parsing * refac: functions * refac * refac/enh: note pdf export * refac/fix: mcp oauth2.1 * chore: format * chore: Changelog (#17995) * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * refac * chore: dep bump --------- Signed-off-by: dependabot[bot] Co-authored-by: silentoplayz Co-authored-by: Shirasawa <764798966@qq.com> Co-authored-by: Jan Kessler Co-authored-by: Jacob Leksan Co-authored-by: Classic298 <27028174+Classic298@users.noreply.github.com> Co-authored-by: sinejespersen Co-authored-by: Selene Blok Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Cyp Co-authored-by: gjveld <19951982+gjveld@users.noreply.github.com> Co-authored-by: joaoback <156559121+joaoback@users.noreply.github.com> Co-authored-by: _00_ <131402327+rgaricano@users.noreply.github.com> Co-authored-by: expruc Co-authored-by: YetheSamartaka <55753928+YetheSamartaka@users.noreply.github.com> Co-authored-by: Akutangulo --- .github/ISSUE_TEMPLATE/bug_report.yaml | 2 + .github/pull_request_template.md | 9 +- CHANGELOG.md | 69 ++ backend/open_webui/config.py | 50 +- backend/open_webui/env.py | 10 +- backend/open_webui/main.py | 97 +- backend/open_webui/models/files.py | 4 +- backend/open_webui/models/functions.py | 28 +- backend/open_webui/retrieval/loaders/main.py | 4 +- .../open_webui/retrieval/loaders/youtube.py | 7 + backend/open_webui/retrieval/utils.py | 46 +- backend/open_webui/retrieval/web/utils.py | 3 +- backend/open_webui/routers/audio.py | 18 +- backend/open_webui/routers/channels.py | 58 +- backend/open_webui/routers/configs.py | 4 +- backend/open_webui/routers/folders.py | 55 +- backend/open_webui/routers/functions.py | 6 + backend/open_webui/routers/ollama.py | 4 + backend/open_webui/routers/openai.py | 3 + backend/open_webui/routers/retrieval.py | 99 +- backend/open_webui/socket/main.py | 22 +- backend/open_webui/tasks.py | 5 +- backend/open_webui/utils/auth.py | 25 +- backend/open_webui/utils/chat.py | 6 +- backend/open_webui/utils/middleware.py | 300 +++--- backend/open_webui/utils/misc.py | 16 + backend/open_webui/utils/oauth.py | 76 +- backend/open_webui/utils/payload.py | 19 +- backend/open_webui/utils/tools.py | 4 - backend/requirements.txt | 87 +- backend/start.sh | 15 +- package-lock.json | 681 ++++++++++++- package.json | 4 +- pyproject.toml | 28 +- src/lib/apis/functions/index.ts | 31 + src/lib/components/AddConnectionModal.svelte | 65 +- src/lib/components/AddToolServerModal.svelte | 13 +- src/lib/components/admin/Functions.svelte | 898 +++++++++--------- .../admin/Functions/AddFunctionMenu.svelte | 10 +- .../components/admin/Settings/Audio.svelte | 33 +- .../admin/Settings/Documents.svelte | 15 + .../Models/Manage/ManageOllama.svelte | 380 +++++--- .../components/admin/Settings/Tools.svelte | 2 +- src/lib/components/admin/Users/Groups.svelte | 10 +- .../admin/Users/UserList/EditUserModal.svelte | 6 +- src/lib/components/channel/Channel.svelte | 6 +- .../components/channel/MessageInput.svelte | 12 +- src/lib/components/channel/Thread.svelte | 6 +- src/lib/components/chat/Chat.svelte | 94 +- src/lib/components/chat/ChatControls.svelte | 251 ++--- .../chat/ChatControls/Embeds.svelte | 9 +- .../ContentRenderer/FloatingButtons.svelte | 27 +- src/lib/components/chat/MessageInput.svelte | 14 +- .../MessageInput/Commands/Knowledge.svelte | 31 +- .../chat/MessageInput/IntegrationsMenu.svelte | 26 +- .../components/chat/Messages/Citations.svelte | 19 +- .../components/chat/Messages/CodeBlock.svelte | 34 +- .../chat/Messages/ResponseMessage.svelte | 3 +- .../ResponseMessage/RegenerateMenu.svelte | 2 +- .../ResponseMessage/StatusHistory.svelte | 21 +- src/lib/components/chat/ModelSelector.svelte | 6 +- .../chat/ModelSelector/Selector.svelte | 20 + src/lib/components/chat/Navbar.svelte | 2 +- src/lib/components/chat/Overview.svelte | 196 +--- src/lib/components/chat/Overview/Flow.svelte | 22 +- src/lib/components/chat/Overview/View.svelte | 207 ++++ src/lib/components/chat/Placeholder.svelte | 9 +- .../chat/Placeholder/FolderTitle.svelte | 22 +- .../Settings/Account/UserProfileImage.svelte | 4 +- .../chat/Settings/Tools/Connection.svelte | 39 +- src/lib/components/common/CodeEditor.svelte | 1 - .../components/common/ConfirmDialog.svelte | 8 +- src/lib/components/common/FileItem.svelte | 3 + .../components/common/RichTextInput.svelte | 47 +- src/lib/components/common/Switch.svelte | 10 +- .../components/icons/AlignHorizontal.svelte | 21 + src/lib/components/icons/AlignVertical.svelte | 21 + src/lib/components/layout/Navbar/Menu.svelte | 26 +- src/lib/components/layout/Sidebar.svelte | 116 +-- .../layout/Sidebar/RecursiveFolder.svelte | 19 +- src/lib/components/notes/NoteEditor.svelte | 45 +- src/lib/components/workspace/Knowledge.svelte | 252 +++-- src/lib/components/workspace/Models.svelte | 672 ++++++------- .../workspace/Models/ModelEditor.svelte | 2 +- .../workspace/Models/ModelMenu.svelte | 9 +- src/lib/components/workspace/Prompts.svelte | 433 ++++----- src/lib/components/workspace/Tools.svelte | 563 +++++------ .../workspace/Tools/AddToolMenu.svelte | 10 +- .../workspace/common/TagSelector.svelte | 106 +++ .../workspace/common/ViewSelector.svelte | 96 ++ src/lib/i18n/locales/ar-BH/translation.json | 32 +- src/lib/i18n/locales/ar/translation.json | 32 +- src/lib/i18n/locales/bg-BG/translation.json | 32 +- src/lib/i18n/locales/bn-BD/translation.json | 32 +- src/lib/i18n/locales/bo-TB/translation.json | 32 +- src/lib/i18n/locales/bs-BA/translation.json | 32 +- src/lib/i18n/locales/ca-ES/translation.json | 32 +- src/lib/i18n/locales/ceb-PH/translation.json | 32 +- src/lib/i18n/locales/cs-CZ/translation.json | 32 +- src/lib/i18n/locales/da-DK/translation.json | 40 +- src/lib/i18n/locales/de-DE/translation.json | 82 +- src/lib/i18n/locales/dg-DG/translation.json | 32 +- src/lib/i18n/locales/el-GR/translation.json | 32 +- src/lib/i18n/locales/en-GB/translation.json | 32 +- src/lib/i18n/locales/en-US/translation.json | 32 +- src/lib/i18n/locales/es-ES/translation.json | 62 +- src/lib/i18n/locales/et-EE/translation.json | 32 +- src/lib/i18n/locales/eu-ES/translation.json | 32 +- src/lib/i18n/locales/fa-IR/translation.json | 32 +- src/lib/i18n/locales/fi-FI/translation.json | 32 +- src/lib/i18n/locales/fr-CA/translation.json | 32 +- src/lib/i18n/locales/fr-FR/translation.json | 32 +- src/lib/i18n/locales/gl-ES/translation.json | 32 +- src/lib/i18n/locales/he-IL/translation.json | 32 +- src/lib/i18n/locales/hi-IN/translation.json | 32 +- src/lib/i18n/locales/hr-HR/translation.json | 32 +- src/lib/i18n/locales/hu-HU/translation.json | 32 +- src/lib/i18n/locales/id-ID/translation.json | 32 +- src/lib/i18n/locales/ie-GA/translation.json | 32 +- src/lib/i18n/locales/it-IT/translation.json | 32 +- src/lib/i18n/locales/ja-JP/translation.json | 32 +- src/lib/i18n/locales/ka-GE/translation.json | 32 +- src/lib/i18n/locales/kab-DZ/translation.json | 32 +- src/lib/i18n/locales/ko-KR/translation.json | 130 +-- src/lib/i18n/locales/lt-LT/translation.json | 32 +- src/lib/i18n/locales/ms-MY/translation.json | 32 +- src/lib/i18n/locales/nb-NO/translation.json | 32 +- src/lib/i18n/locales/nl-NL/translation.json | 32 +- src/lib/i18n/locales/pa-IN/translation.json | 32 +- src/lib/i18n/locales/pl-PL/translation.json | 32 +- src/lib/i18n/locales/pt-BR/translation.json | 68 +- src/lib/i18n/locales/pt-PT/translation.json | 32 +- src/lib/i18n/locales/ro-RO/translation.json | 32 +- src/lib/i18n/locales/ru-RU/translation.json | 32 +- src/lib/i18n/locales/sk-SK/translation.json | 32 +- src/lib/i18n/locales/sr-RS/translation.json | 32 +- src/lib/i18n/locales/sv-SE/translation.json | 32 +- src/lib/i18n/locales/th-TH/translation.json | 32 +- src/lib/i18n/locales/tk-TM/translation.json | 32 +- src/lib/i18n/locales/tr-TR/translation.json | 32 +- src/lib/i18n/locales/ug-CN/translation.json | 32 +- src/lib/i18n/locales/uk-UA/translation.json | 32 +- src/lib/i18n/locales/ur-PK/translation.json | 32 +- .../i18n/locales/uz-Cyrl-UZ/translation.json | 32 +- .../i18n/locales/uz-Latn-Uz/translation.json | 32 +- src/lib/i18n/locales/vi-VN/translation.json | 32 +- src/lib/i18n/locales/zh-CN/translation.json | 52 +- src/lib/i18n/locales/zh-TW/translation.json | 42 +- src/lib/utils/index.ts | 64 +- src/lib/utils/onedrive-file-picker.ts | 16 +- src/routes/(app)/+layout.svelte | 211 ++-- src/routes/(app)/workspace/+layout.svelte | 5 +- .../(app)/workspace/knowledge/+page.svelte | 16 +- .../workspace/models/create/+page.svelte | 14 +- src/routes/+layout.svelte | 12 +- src/routes/auth/+page.svelte | 2 +- 156 files changed, 6119 insertions(+), 3198 deletions(-) create mode 100644 src/lib/components/chat/Overview/View.svelte create mode 100644 src/lib/components/icons/AlignHorizontal.svelte create mode 100644 src/lib/components/icons/AlignVertical.svelte create mode 100644 src/lib/components/workspace/common/TagSelector.svelte create mode 100644 src/lib/components/workspace/common/ViewSelector.svelte diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml index 1a1f0d1f4f..5be1ac21b3 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -13,6 +13,8 @@ body: - **Before submitting a bug report**: Please check the [Issues](https://github.com/open-webui/open-webui/issues) and [Discussions](https://github.com/open-webui/open-webui/discussions) sections to see if a similar issue has already been reported. If unsure, start a discussion first, as this helps us efficiently focus on improving the project. Duplicates may be closed without notice. **Please search for existing issues and discussions.** + - Check for opened, **but also for (recently) CLOSED issues** as the issue you are trying to report **might already have been fixed!** + - **Respectful collaboration**: Open WebUI is a volunteer-driven project with a single maintainer and contributors who also have full-time jobs. Please be constructive and respectful in your communication. - **Contributing**: If you encounter an issue, consider submitting a pull request or forking the project. We prioritize preventing contributor burnout to maintain Open WebUI's quality. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index fa82ae26a1..0ec871f328 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -4,14 +4,15 @@ **Before submitting, make sure you've checked the following:** -- [ ] **Target branch:** Please verify that the pull request targets the `dev` branch. +- [ ] **Target branch:** Verify that the pull request targets the `dev` branch. Not targeting the `dev` branch may lead to immediate closure of the PR. - [ ] **Description:** Provide a concise description of the changes made in this pull request. - [ ] **Changelog:** Ensure a changelog entry following the format of [Keep a Changelog](https://keepachangelog.com/) is added at the bottom of the PR description. -- [ ] **Documentation:** Have you updated relevant documentation [Open WebUI Docs](https://github.com/open-webui/docs), or other documentation sources? +- [ ] **Documentation:** If necessary, update relevant documentation [Open WebUI Docs](https://github.com/open-webui/docs) like environment variables, the tutorials, or other documentation sources. - [ ] **Dependencies:** Are there any new dependencies? Have you updated the dependency versions in the documentation? -- [ ] **Testing:** Have you written and run sufficient tests to validate the changes? +- [ ] **Testing:** Perform manual tests to verify the implemented fix/feature works as intended AND does not break any other functionality. Take this as an opportunity to make screenshots of the feature/fix and include it in the PR description. +- [ ] **Agentic AI Code:**: Confirm this Pull Request is **not written by any AI Agent** or has at least gone through additional human review **and** manual testing. If any AI Agent is the co-author of this PR, it may lead to immediate closure of the PR. - [ ] **Code review:** Have you performed a self-review of your code, addressing any coding standard issues and ensuring adherence to the project's coding standards? -- [ ] **Prefix:** To clearly categorize this pull request, prefix the pull request title using one of the following: +- [ ] **Title Prefix:** To clearly categorize this pull request, prefix the pull request title using one of the following: - **BREAKING CHANGE**: Significant changes that may affect compatibility - **build**: Changes that affect the build system or external dependencies - **ci**: Changes to our continuous integration processes or workflows diff --git a/CHANGELOG.md b/CHANGELOG.md index a69bb9dace..2e4938dfe1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,75 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.6.33] - 2025-10-08 + +### Added + +- 🎨 Workspace interface received a comprehensive redesign across Models, Knowledge, Prompts, and Tools sections, featuring reorganized controls, view filters for created vs shared items, tag selectors, improved visual hierarchy, and streamlined import/export functionality. [Commit](https://github.com/open-webui/open-webui/commit/2c59a288603d8c5f004f223ee00fef37cc763a8e), [Commit](https://github.com/open-webui/open-webui/commit/6050c86ab6ef6b8c96dd3f99c62a6867011b67a4), [Commit](https://github.com/open-webui/open-webui/commit/96ecb47bc71c072aa34ef2be10781b042bef4e8c), [Commit](https://github.com/open-webui/open-webui/commit/2250d102b28075a9611696e911536547abb8b38a), [Commit](https://github.com/open-webui/open-webui/commit/23c8f6d507bfee75ab0015a3e2972d5c26f7e9bf), [Commit](https://github.com/open-webui/open-webui/commit/a743b16728c6ae24b8befbc2d7f24eb9e20c4ad5) +- 🛠️ Functions admin interface received a comprehensive redesign with creator attribution display, ownership filters for created vs shared items, improved organization, and refined styling. [Commit](https://github.com/open-webui/open-webui/commit/f5e1a42f51acc0b9d5b63a33c1ca2e42470239c1) +- ⚡ Page initialization performance is significantly improved through parallel data loading and optimized folder API calls, reducing initial page load time. [#17559](https://github.com/open-webui/open-webui/pull/17559), [#17889](https://github.com/open-webui/open-webui/pull/17889) +- ⚡ Chat overview component is now dynamically loaded on demand, reducing initial page bundle size by approximately 470KB and improving first-screen loading speed. [#17595](https://github.com/open-webui/open-webui/pull/17595) +- 📁 Folders can now be attached to chats using the "#" command, automatically expanding to include all files within the folder for streamlined knowledge base integration. [Commit](https://github.com/open-webui/open-webui/commit/d2cb78179d66dc85188172a08622d4c97a2ea1ee) +- 📱 Progressive Web App now supports Android share target functionality, allowing users to share web pages, YouTube videos, and text directly to Open WebUI from the system share menu. [#17633](https://github.com/open-webui/open-webui/pull/17633), [#17125](https://github.com/open-webui/open-webui/issues/17125) +- 🗄️ Redis session storage is now available as an experimental option for OAuth authentication flows via the ENABLE_STAR_SESSIONS_MIDDLEWARE environment variable, providing shared session state across multi-replica deployments to address CSRF errors, though currently only basic Redis setups are supported. [#17223](https://github.com/open-webui/open-webui/pull/17223), [#15373](https://github.com/open-webui/open-webui/issues/15373), [Docs:Commit](https://github.com/open-webui/docs/commit/14052347f165d1b597615370373d7289ce44c7f9) +- 📊 Vega and Vega-Lite chart visualization renderers are now supported in code blocks, enabling inline rendering of data visualizations with automatic compilation of Vega-Lite specifications. [#18033](https://github.com/open-webui/open-webui/pull/18033), [#18040](https://github.com/open-webui/open-webui/pull/18040), [#18022](https://github.com/open-webui/open-webui/issues/18022) +- 🔗 OpenAI connections now support custom HTTP headers, enabling users to configure authentication and routing headers for specific deployment requirements. [#18021](https://github.com/open-webui/open-webui/pull/18021), [#9732](https://github.com/open-webui/open-webui/discussions/9732) +- 🔐 OpenID Connect authentication now supports OIDC providers without email scope via the ENABLE_OAUTH_WITHOUT_EMAIL environment variable, enabling compatibility with identity providers that don't expose email addresses. [#18047](https://github.com/open-webui/open-webui/pull/18047), [#18045](https://github.com/open-webui/open-webui/issues/18045) +- 🤖 Ollama model management modal now features individual model update cancellation, comprehensive tooltips for all buttons, and streamlined notification behavior to reduce toast spam. [#16863](https://github.com/open-webui/open-webui/pull/16863) +- ☁️ OneDrive file picker now includes search functionality and "My Organization" pivot for business accounts, enabling easier file discovery across organizational content. [#17930](https://github.com/open-webui/open-webui/pull/17930), [#17929](https://github.com/open-webui/open-webui/issues/17929) +- 📊 Chat overview flow diagram now supports toggling between vertical and horizontal layout orientations for improved visualization flexibility. [#17941](https://github.com/open-webui/open-webui/pull/17941) +- 🔊 OpenAI Text-to-Speech engine now supports additional parameters, allowing users to customize TTS behavior with provider-specific options via JSON configuration. [#17985](https://github.com/open-webui/open-webui/issues/17985), [#17188](https://github.com/open-webui/open-webui/pull/17188) +- 🛠️ Tool server list now displays server name, URL, and type (OpenAPI or MCP) for easier identification and management. [#18062](https://github.com/open-webui/open-webui/issues/18062) +- 📁 Folders now remember the last selected model, automatically applying it when starting new chats within that folder. [#17836](https://github.com/open-webui/open-webui/issues/17836) +- 🔢 Ollama embedding endpoint now supports the optional dimensions parameter for controlling embedding output size, compatible with Ollama v0.11.11 and later. [#17942](https://github.com/open-webui/open-webui/pull/17942) +- ⚡ Workspace knowledge page load time is improved by removing redundant API calls, enhancing overall responsiveness. [#18057](https://github.com/open-webui/open-webui/pull/18057) +- ⚡ File metadata query performance is enhanced by selecting only relevant columns instead of retrieving entire records, reducing database overhead. [#18013](https://github.com/open-webui/open-webui/pull/18013) +- 📄 Note PDF exports now include titles and properly render in dark mode with appropriate background colors. [Commit](https://github.com/open-webui/open-webui/commit/216fb5c3db1a223ffe6e72d97aa9551fe0e2d028) +- 📄 Docling document extraction now supports additional parameters for VLM pipeline configuration, enabling customized vision model settings. [#17363](https://github.com/open-webui/open-webui/pull/17363) +- ⚙️ Server startup script now supports passing arbitrary arguments to uvicorn, enabling custom server configuration options. [#17919](https://github.com/open-webui/open-webui/pull/17919), [#17918](https://github.com/open-webui/open-webui/issues/17918) +- 🔄 Various improvements were implemented across the frontend and backend to enhance performance, stability, and security. +- 🌐 Translations for German, Danish, Spanish, Korean, Portuguese (Brazil), Simplified Chinese, and Traditional Chinese were enhanced and expanded. + +### Fixed + +- 💬 System prompts are no longer duplicated in chat requests, eliminating confusion and excessive token usage caused by repeated instructions being sent to models. [#17198](https://github.com/open-webui/open-webui/issues/17198), [#16855](https://github.com/open-webui/open-webui/issues/16855) +- 🔐 MCP OAuth 2.1 authentication now complies with the standard by implementing PKCE with S256 code challenge method and explicitly passing client credentials during token authorization, resolving "code_challenge: Field required" and "client_id: Field required" errors when connecting to OAuth-secured MCP servers. [Commit](https://github.com/open-webui/open-webui/commit/911a114ad459f5deebd97543c13c2b90196efb54), [#18010](https://github.com/open-webui/open-webui/issues/18010), [#18087](https://github.com/open-webui/open-webui/pull/18087) +- 🔐 OAuth signup flow now handles password hashing correctly by migrating from passlib to native bcrypt, preventing failures when passwords exceed 72 bytes. [#17917](https://github.com/open-webui/open-webui/issues/17917) +- 🔐 OAuth token refresh errors are resolved by properly registering and storing OAuth clients, fixing "Constructor parameter should be str" exceptions for Google, Microsoft, and OIDC providers. [#17829](https://github.com/open-webui/open-webui/issues/17829) +- 🔐 OAuth server metadata URL is now correctly accessed via the proper attribute, fixing automatic token refresh and logout functionality for Microsoft OAuth provider when OPENID_PROVIDER_URL is not set. [#18065](https://github.com/open-webui/open-webui/pull/18065) +- 🔐 OAuth credential decryption failures now allow the application to start gracefully with clear error messages instead of crashing, preventing complete service outages when WEBUI_SECRET_KEY mismatches occur during database migrations or environment changes. [#18094](https://github.com/open-webui/open-webui/pull/18094), [#18092](https://github.com/open-webui/open-webui/issues/18092) +- 🔐 OAuth 2.1 server discovery now correctly attempts all configured discovery URLs in sequence instead of only trying the first URL. [#17906](https://github.com/open-webui/open-webui/pull/17906), [#17904](https://github.com/open-webui/open-webui/issues/17904), [#18026](https://github.com/open-webui/open-webui/pull/18026) +- 🔐 Login redirect now correctly honors the redirect query parameter after authentication, ensuring users are returned to their intended destination with query parameters intact instead of defaulting to the homepage. [#18071](https://github.com/open-webui/open-webui/issues/18071) +- ☁️ OneDrive Business integration authentication regression is resolved, ensuring the popup now properly triggers when connecting to OneDrive accounts. [#17902](https://github.com/open-webui/open-webui/pull/17902), [#17825](https://github.com/open-webui/open-webui/discussions/17825), [#17816](https://github.com/open-webui/open-webui/issues/17816) +- 👥 Default group settings now persist correctly after page navigation, ensuring configuration changes are properly saved and retained. [#17899](https://github.com/open-webui/open-webui/issues/17899), [#18003](https://github.com/open-webui/open-webui/issues/18003) +- 📁 Folder data integrity is now verified on retrieval, automatically fixing orphaned folders with invalid parent references and ensuring proper cascading deletion of nested folder structures. [Commit](https://github.com/open-webui/open-webui/commit/5448618dd5ea181b9635b77040cef60926a902ff) +- 🗄️ Redis Sentinel and Redis Cluster configurations with the experimental ENABLE_STAR_SESSIONS_MIDDLEWARE feature are now properly isolated by making the feature opt-in only, preventing ReadOnlyError failures when connecting to read replicas in multi-node Redis deployments. [#18073](https://github.com/open-webui/open-webui/issues/18073) +- 📊 Mermaid and Vega diagram rendering now displays error toast notifications when syntax errors are detected, helping users identify and fix diagram issues instead of silently failing. [#18068](https://github.com/open-webui/open-webui/pull/18068) +- 🤖 Reasoning models that return reasoning_content instead of content no longer cause NoneType errors during chat title generation, follow-up suggestions, and tag generation. [#18080](https://github.com/open-webui/open-webui/pull/18080) +- 📚 Citation rendering now correctly handles multiple source references in a single bracket, parsing formats like [1,2] and [1, 2] into separate clickable citation links. [#18120](https://github.com/open-webui/open-webui/pull/18120) +- 🔍 Web search now handles individual source failures gracefully, continuing to process remaining sources instead of failing entirely when a single URL is unreachable or returns an error. [Commit](https://github.com/open-webui/open-webui/commit/e000494e488090c5f66989a2b3f89d3eaeb7946b), [Commit](https://github.com/open-webui/open-webui/commit/53e98620bff38ab9280aee5165af0a704bdd99b9) +- 🔍 Hybrid search with reranking now handles empty result sets gracefully instead of crashing with ValueError when all results are filtered out due to relevance thresholds. [#18096](https://github.com/open-webui/open-webui/issues/18096) +- 🔍 Reranking models without defined padding tokens now work correctly by automatically falling back to eos_token_id as pad_token_id, fixing "Cannot handle batch sizes > 1" errors for models like Qwen3-Reranker. [#18108](https://github.com/open-webui/open-webui/pull/18108), [#16027](https://github.com/open-webui/open-webui/discussions/16027) +- 🔍 Model selector search now correctly returns results for non-admin users by dynamically updating the search index when the model list changes, fixing a race condition that caused empty search results. [#17996](https://github.com/open-webui/open-webui/pull/17996), [#17960](https://github.com/open-webui/open-webui/pull/17960) +- ⚡ Task model function calling performance is improved by excluding base64 image data from payloads, significantly reducing token count and memory usage when images are present in conversations. [#17897](https://github.com/open-webui/open-webui/pull/17897) +- 🤖 Text selection "Ask" action now correctly recognizes and uses local models configured via direct connections instead of only showing external provider models. [#17896](https://github.com/open-webui/open-webui/issues/17896) +- 🛑 Task cancellation API now returns accurate response status, correctly reporting successful cancellations instead of incorrectly indicating failures. [#17920](https://github.com/open-webui/open-webui/issues/17920) +- 💬 Follow-up query suggestions are now generated and displayed in temporary chats, matching the behavior of saved chats. [#14987](https://github.com/open-webui/open-webui/issues/14987) +- 🔊 Azure Text-to-Speech now properly escapes special characters like ampersands in SSML, preventing HTTP 400 errors and ensuring audio generation succeeds for all text content. [#17962](https://github.com/open-webui/open-webui/issues/17962) +- 🛠️ OpenAPI tool server calls with optional parameters now execute successfully even when no arguments are provided, removing the incorrect requirement for a request body. [#18036](https://github.com/open-webui/open-webui/issues/18036) +- 🛠️ MCP mode tool server connections no longer incorrectly validate the OpenAPI path field, allowing seamless switching between OpenAPI and MCP connection types. [#17989](https://github.com/open-webui/open-webui/pull/17989), [#17988](https://github.com/open-webui/open-webui/issues/17988) +- 🛠️ Third-party tool responses containing non-UTF8 or invalid byte sequences are now handled gracefully without causing request failures. [#17882](https://github.com/open-webui/open-webui/pull/17882) +- 🎨 Workspace filter dropdown now correctly renders model tags as strings instead of displaying individual characters, fixing broken filtering interface when models have multiple tags. [#18034](https://github.com/open-webui/open-webui/issues/18034) +- ⌨️ Ctrl+Enter keyboard shortcut now correctly sends messages in mobile and narrow browser views on Chrome instead of inserting newlines. [#17975](https://github.com/open-webui/open-webui/issues/17975) +- ⌨️ Tab characters are now preserved when pasting code or formatted text into the chat input box in plain text mode. [#17958](https://github.com/open-webui/open-webui/issues/17958) +- 📋 Text selection copying from the chat input box now correctly copies only the selected text instead of the entire textbox content. [#17911](https://github.com/open-webui/open-webui/issues/17911) +- 🔍 Web search query logging now uses debug level instead of info level, preventing user search queries from appearing in production logs. [#17888](https://github.com/open-webui/open-webui/pull/17888) +- 📝 Debug print statements in middleware were removed to prevent excessive log pollution and respect configured logging levels. [#17943](https://github.com/open-webui/open-webui/issues/17943) + +### Changed + +- 🗄️ Milvus vector database dependency is updated from pymilvus 2.5.0 to 2.6.2, ensuring compatibility with newer Milvus versions but requiring users on older Milvus instances to either upgrade their database or manually downgrade the pymilvus package. [#18066](https://github.com/open-webui/open-webui/pull/18066) + ## [0.6.32] - 2025-09-29 ### Added diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 3a4309438a..bd73807621 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -605,8 +605,8 @@ def load_oauth_providers(): OAUTH_PROVIDERS.clear() if GOOGLE_CLIENT_ID.value and GOOGLE_CLIENT_SECRET.value: - def google_oauth_register(client: OAuth): - client.register( + def google_oauth_register(oauth: OAuth): + client = oauth.register( name="google", client_id=GOOGLE_CLIENT_ID.value, client_secret=GOOGLE_CLIENT_SECRET.value, @@ -621,6 +621,7 @@ def load_oauth_providers(): }, redirect_uri=GOOGLE_REDIRECT_URI.value, ) + return client OAUTH_PROVIDERS["google"] = { "redirect_uri": GOOGLE_REDIRECT_URI.value, @@ -633,8 +634,8 @@ def load_oauth_providers(): and MICROSOFT_CLIENT_TENANT_ID.value ): - def microsoft_oauth_register(client: OAuth): - client.register( + def microsoft_oauth_register(oauth: OAuth): + client = oauth.register( name="microsoft", client_id=MICROSOFT_CLIENT_ID.value, client_secret=MICROSOFT_CLIENT_SECRET.value, @@ -649,6 +650,7 @@ def load_oauth_providers(): }, redirect_uri=MICROSOFT_REDIRECT_URI.value, ) + return client OAUTH_PROVIDERS["microsoft"] = { "redirect_uri": MICROSOFT_REDIRECT_URI.value, @@ -658,8 +660,8 @@ def load_oauth_providers(): if GITHUB_CLIENT_ID.value and GITHUB_CLIENT_SECRET.value: - def github_oauth_register(client: OAuth): - client.register( + def github_oauth_register(oauth: OAuth): + client = oauth.register( name="github", client_id=GITHUB_CLIENT_ID.value, client_secret=GITHUB_CLIENT_SECRET.value, @@ -677,6 +679,7 @@ def load_oauth_providers(): }, redirect_uri=GITHUB_CLIENT_REDIRECT_URI.value, ) + return client OAUTH_PROVIDERS["github"] = { "redirect_uri": GITHUB_CLIENT_REDIRECT_URI.value, @@ -690,7 +693,7 @@ def load_oauth_providers(): and OPENID_PROVIDER_URL.value ): - def oidc_oauth_register(client: OAuth): + def oidc_oauth_register(oauth: OAuth): client_kwargs = { "scope": OAUTH_SCOPES.value, **( @@ -716,7 +719,7 @@ def load_oauth_providers(): % ("S256", OAUTH_CODE_CHALLENGE_METHOD.value) ) - client.register( + client = oauth.register( name="oidc", client_id=OAUTH_CLIENT_ID.value, client_secret=OAUTH_CLIENT_SECRET.value, @@ -724,6 +727,7 @@ def load_oauth_providers(): client_kwargs=client_kwargs, redirect_uri=OPENID_REDIRECT_URI.value, ) + return client OAUTH_PROVIDERS["oidc"] = { "name": OAUTH_PROVIDER_NAME.value, @@ -733,8 +737,8 @@ def load_oauth_providers(): if FEISHU_CLIENT_ID.value and FEISHU_CLIENT_SECRET.value: - def feishu_oauth_register(client: OAuth): - client.register( + def feishu_oauth_register(oauth: OAuth): + client = oauth.register( name="feishu", client_id=FEISHU_CLIENT_ID.value, client_secret=FEISHU_CLIENT_SECRET.value, @@ -752,6 +756,7 @@ def load_oauth_providers(): }, redirect_uri=FEISHU_REDIRECT_URI.value, ) + return client OAUTH_PROVIDERS["feishu"] = { "register": feishu_oauth_register, @@ -2310,6 +2315,18 @@ DOCLING_SERVER_URL = PersistentConfig( os.getenv("DOCLING_SERVER_URL", "http://docling:5001"), ) +docling_params = os.getenv("DOCLING_PARAMS", "") +try: + docling_params = json.loads(docling_params) +except json.JSONDecodeError: + docling_params = {} + +DOCLING_PARAMS = PersistentConfig( + "DOCLING_PARAMS", + "rag.docling_params", + docling_params, +) + DOCLING_DO_OCR = PersistentConfig( "DOCLING_DO_OCR", "rag.docling_do_ocr", @@ -3361,6 +3378,19 @@ AUDIO_TTS_OPENAI_API_KEY = PersistentConfig( os.getenv("AUDIO_TTS_OPENAI_API_KEY", OPENAI_API_KEY), ) +audio_tts_openai_params = os.getenv("AUDIO_TTS_OPENAI_PARAMS", "") +try: + audio_tts_openai_params = json.loads(audio_tts_openai_params) +except json.JSONDecodeError: + audio_tts_openai_params = {} + +AUDIO_TTS_OPENAI_PARAMS = PersistentConfig( + "AUDIO_TTS_OPENAI_PARAMS", + "audio.tts.openai.params", + audio_tts_openai_params, +) + + AUDIO_TTS_API_KEY = PersistentConfig( "AUDIO_TTS_API_KEY", "audio.tts.api_key", diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index e02424f969..8f9c1fbc44 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -212,6 +212,11 @@ ENABLE_FORWARD_USER_INFO_HEADERS = ( os.environ.get("ENABLE_FORWARD_USER_INFO_HEADERS", "False").lower() == "true" ) +# Experimental feature, may be removed in future +ENABLE_STAR_SESSIONS_MIDDLEWARE = ( + os.environ.get("ENABLE_STAR_SESSIONS_MIDDLEWARE", "False").lower() == "true" +) + #################################### # WEBUI_BUILD_HASH #################################### @@ -468,7 +473,9 @@ ENABLE_COMPRESSION_MIDDLEWARE = ( #################################### # OAUTH Configuration #################################### - +ENABLE_OAUTH_EMAIL_FALLBACK = ( + os.environ.get("ENABLE_OAUTH_EMAIL_FALLBACK", "False").lower() == "true" +) ENABLE_OAUTH_ID_TOKEN_COOKIE = ( os.environ.get("ENABLE_OAUTH_ID_TOKEN_COOKIE", "True").lower() == "true" @@ -482,7 +489,6 @@ OAUTH_SESSION_TOKEN_ENCRYPTION_KEY = os.environ.get( "OAUTH_SESSION_TOKEN_ENCRYPTION_KEY", WEBUI_SECRET_KEY ) - #################################### # SCIM Configuration #################################### diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 904399af14..221c20f305 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -8,6 +8,7 @@ import shutil import sys import time import random +import re from uuid import uuid4 @@ -174,13 +175,14 @@ from open_webui.config import ( AUDIO_STT_AZURE_LOCALES, AUDIO_STT_AZURE_BASE_URL, AUDIO_STT_AZURE_MAX_SPEAKERS, - AUDIO_TTS_API_KEY, AUDIO_TTS_ENGINE, AUDIO_TTS_MODEL, + AUDIO_TTS_VOICE, AUDIO_TTS_OPENAI_API_BASE_URL, AUDIO_TTS_OPENAI_API_KEY, + AUDIO_TTS_OPENAI_PARAMS, + AUDIO_TTS_API_KEY, AUDIO_TTS_SPLIT_ON, - AUDIO_TTS_VOICE, AUDIO_TTS_AZURE_SPEECH_REGION, AUDIO_TTS_AZURE_SPEECH_BASE_URL, AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT, @@ -246,6 +248,7 @@ from open_webui.config import ( EXTERNAL_DOCUMENT_LOADER_API_KEY, TIKA_SERVER_URL, DOCLING_SERVER_URL, + DOCLING_PARAMS, DOCLING_DO_OCR, DOCLING_FORCE_OCR, DOCLING_OCR_ENGINE, @@ -447,6 +450,7 @@ from open_webui.env import ( ENABLE_OTEL, EXTERNAL_PWA_MANIFEST_URL, AIOHTTP_CLIENT_SESSION_SSL, + ENABLE_STAR_SESSIONS_MIDDLEWARE, ) @@ -834,6 +838,7 @@ app.state.config.EXTERNAL_DOCUMENT_LOADER_URL = EXTERNAL_DOCUMENT_LOADER_URL app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY = EXTERNAL_DOCUMENT_LOADER_API_KEY app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL app.state.config.DOCLING_SERVER_URL = DOCLING_SERVER_URL +app.state.config.DOCLING_PARAMS = DOCLING_PARAMS app.state.config.DOCLING_DO_OCR = DOCLING_DO_OCR app.state.config.DOCLING_FORCE_OCR = DOCLING_FORCE_OCR app.state.config.DOCLING_OCR_ENGINE = DOCLING_OCR_ENGINE @@ -1095,11 +1100,15 @@ app.state.config.AUDIO_STT_AZURE_LOCALES = AUDIO_STT_AZURE_LOCALES app.state.config.AUDIO_STT_AZURE_BASE_URL = AUDIO_STT_AZURE_BASE_URL app.state.config.AUDIO_STT_AZURE_MAX_SPEAKERS = AUDIO_STT_AZURE_MAX_SPEAKERS -app.state.config.TTS_OPENAI_API_BASE_URL = AUDIO_TTS_OPENAI_API_BASE_URL -app.state.config.TTS_OPENAI_API_KEY = AUDIO_TTS_OPENAI_API_KEY app.state.config.TTS_ENGINE = AUDIO_TTS_ENGINE + app.state.config.TTS_MODEL = AUDIO_TTS_MODEL app.state.config.TTS_VOICE = AUDIO_TTS_VOICE + +app.state.config.TTS_OPENAI_API_BASE_URL = AUDIO_TTS_OPENAI_API_BASE_URL +app.state.config.TTS_OPENAI_API_KEY = AUDIO_TTS_OPENAI_API_KEY +app.state.config.TTS_OPENAI_PARAMS = AUDIO_TTS_OPENAI_PARAMS + app.state.config.TTS_API_KEY = AUDIO_TTS_API_KEY app.state.config.TTS_SPLIT_ON = AUDIO_TTS_SPLIT_ON @@ -1170,12 +1179,32 @@ class RedirectMiddleware(BaseHTTPMiddleware): path = request.url.path query_params = dict(parse_qs(urlparse(str(request.url)).query)) + redirect_params = {} + # Check for the specific watch path and the presence of 'v' parameter if path.endswith("/watch") and "v" in query_params: # Extract the first 'v' parameter - video_id = query_params["v"][0] - encoded_video_id = urlencode({"youtube": video_id}) - redirect_url = f"/?{encoded_video_id}" + youtube_video_id = query_params["v"][0] + redirect_params["youtube"] = youtube_video_id + + if "shared" in query_params and len(query_params["shared"]) > 0: + # PWA share_target support + + text = query_params["shared"][0] + if text: + urls = re.match(r"https://\S+", text) + if urls: + from open_webui.retrieval.loaders.youtube import _parse_video_id + + if youtube_video_id := _parse_video_id(urls[0]): + redirect_params["youtube"] = youtube_video_id + else: + redirect_params["load-url"] = urls[0] + else: + redirect_params["q"] = text + + if redirect_params: + redirect_url = f"/?{urlencode(redirect_params)}" return RedirectResponse(url=redirect_url) # Proceed with the normal flow of other requests @@ -1474,7 +1503,7 @@ async def chat_completion( } if metadata.get("chat_id") and (user and user.role != "admin"): - if metadata["chat_id"] != "local": + if not metadata["chat_id"].startswith("local:"): chat = Chats.get_chat_by_id_and_user_id(metadata["chat_id"], user.id) if chat is None: raise HTTPException( @@ -1501,13 +1530,14 @@ async def chat_completion( response = await chat_completion_handler(request, form_data, user) if metadata.get("chat_id") and metadata.get("message_id"): try: - Chats.upsert_message_to_chat_by_id_and_message_id( - metadata["chat_id"], - metadata["message_id"], - { - "model": model_id, - }, - ) + if not metadata["chat_id"].startswith("local:"): + Chats.upsert_message_to_chat_by_id_and_message_id( + metadata["chat_id"], + metadata["message_id"], + { + "model": model_id, + }, + ) except: pass @@ -1528,13 +1558,14 @@ async def chat_completion( if metadata.get("chat_id") and metadata.get("message_id"): # Update the chat message with the error try: - Chats.upsert_message_to_chat_by_id_and_message_id( - metadata["chat_id"], - metadata["message_id"], - { - "error": {"content": str(e)}, - }, - ) + if not metadata["chat_id"].startswith("local:"): + Chats.upsert_message_to_chat_by_id_and_message_id( + metadata["chat_id"], + metadata["message_id"], + { + "error": {"content": str(e)}, + }, + ) event_emitter = get_event_emitter(metadata) await event_emitter( @@ -1903,13 +1934,20 @@ if len(app.state.config.TOOL_SERVER_CONNECTIONS) > 0: "oauth_client_info", "" ) - oauth_client_info = decrypt_data(oauth_client_info) - app.state.oauth_client_manager.add_client( - f"mcp:{server_id}", OAuthClientInformationFull(**oauth_client_info) - ) + try: + oauth_client_info = decrypt_data(oauth_client_info) + app.state.oauth_client_manager.add_client( + f"mcp:{server_id}", + OAuthClientInformationFull(**oauth_client_info), + ) + except Exception as e: + log.error( + f"Error adding OAuth client for MCP tool server {server_id}: {e}" + ) + pass try: - if REDIS_URL: + if ENABLE_STAR_SESSIONS_MIDDLEWARE: redis_session_store = RedisStore( url=REDIS_URL, prefix=(f"{REDIS_KEY_PREFIX}:session:" if REDIS_KEY_PREFIX else "session:"), @@ -2004,6 +2042,11 @@ async def get_manifest_json(): "purpose": "maskable", }, ], + "share_target": { + "action": "/", + "method": "GET", + "params": {"text": "shared"}, + }, } diff --git a/backend/open_webui/models/files.py b/backend/open_webui/models/files.py index bf07b5f86f..c5cbaf91f8 100644 --- a/backend/open_webui/models/files.py +++ b/backend/open_webui/models/files.py @@ -186,7 +186,9 @@ class FilesTable: created_at=file.created_at, updated_at=file.updated_at, ) - for file in db.query(File) + for file in db.query( + File.id, File.meta, File.created_at, File.updated_at + ) .filter(File.id.in_(ids)) .order_by(File.updated_at.desc()) .all() diff --git a/backend/open_webui/models/functions.py b/backend/open_webui/models/functions.py index e8ce3aa811..2020a29633 100644 --- a/backend/open_webui/models/functions.py +++ b/backend/open_webui/models/functions.py @@ -3,7 +3,7 @@ import time from typing import Optional from open_webui.internal.db import Base, JSONField, get_db -from open_webui.models.users import Users +from open_webui.models.users import Users, UserModel from open_webui.env import SRC_LOG_LEVELS from pydantic import BaseModel, ConfigDict from sqlalchemy import BigInteger, Boolean, Column, String, Text, Index @@ -76,6 +76,10 @@ class FunctionWithValvesModel(BaseModel): #################### +class FunctionUserResponse(FunctionModel): + user: Optional[UserModel] = None + + class FunctionResponse(BaseModel): id: str user_id: str @@ -203,6 +207,28 @@ class FunctionsTable: FunctionModel.model_validate(function) for function in functions ] + def get_function_list(self) -> list[FunctionUserResponse]: + with get_db() as db: + functions = db.query(Function).order_by(Function.updated_at.desc()).all() + user_ids = list(set(func.user_id for func in functions)) + + users = Users.get_users_by_user_ids(user_ids) if user_ids else [] + users_dict = {user.id: user for user in users} + + return [ + FunctionUserResponse.model_validate( + { + **FunctionModel.model_validate(func).model_dump(), + "user": ( + users_dict.get(func.user_id).model_dump() + if func.user_id in users_dict + else None + ), + } + ) + for func in functions + ] + def get_functions_by_type( self, type: str, active_only=False ) -> list[FunctionModel]: diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index 45f3d8c941..b3d90cc8f3 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -346,11 +346,9 @@ class Loader: self.engine == "document_intelligence" and self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT") != "" and ( - file_ext in ["pdf", "xls", "xlsx", "docx", "ppt", "pptx"] + file_ext in ["pdf", "docx", "ppt", "pptx"] or file_content_type in [ - "application/vnd.ms-excel", - "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "application/vnd.ms-powerpoint", "application/vnd.openxmlformats-officedocument.presentationml.presentation", diff --git a/backend/open_webui/retrieval/loaders/youtube.py b/backend/open_webui/retrieval/loaders/youtube.py index 360ef0a6c7..da17eaef65 100644 --- a/backend/open_webui/retrieval/loaders/youtube.py +++ b/backend/open_webui/retrieval/loaders/youtube.py @@ -157,3 +157,10 @@ class YoutubeLoader: f"No transcript found for any of the specified languages: {languages_tried}. Verify if the video has transcripts, add more languages if needed." ) raise NoTranscriptFound(self.video_id, self.language, list(transcript_list)) + + async def aload(self) -> Generator[Document, None, None]: + """Asynchronously load YouTube transcripts into `Document` objects.""" + import asyncio + + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, self.load) diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 65da1592e1..66a7fa2ddf 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -6,6 +6,7 @@ import requests import hashlib from concurrent.futures import ThreadPoolExecutor import time +import re from urllib.parse import quote from huggingface_hub import snapshot_download @@ -16,6 +17,7 @@ from langchain_core.documents import Document from open_webui.config import VECTOR_DB from open_webui.retrieval.vector.factory import VECTOR_DB_CLIENT + from open_webui.models.users import UserModel from open_webui.models.files import Files from open_webui.models.knowledge import Knowledges @@ -27,6 +29,9 @@ from open_webui.retrieval.vector.main import GetResult from open_webui.utils.access_control import has_access from open_webui.utils.misc import get_message_list +from open_webui.retrieval.web.utils import get_web_loader +from open_webui.retrieval.loaders.youtube import YoutubeLoader + from open_webui.env import ( SRC_LOG_LEVELS, @@ -49,6 +54,33 @@ from langchain_core.callbacks import CallbackManagerForRetrieverRun from langchain_core.retrievers import BaseRetriever +def is_youtube_url(url: str) -> bool: + youtube_regex = r"^(https?://)?(www\.)?(youtube\.com|youtu\.be)/.+$" + return re.match(youtube_regex, url) is not None + + +def get_loader(request, url: str): + if is_youtube_url(url): + return YoutubeLoader( + url, + language=request.app.state.config.YOUTUBE_LOADER_LANGUAGE, + proxy_url=request.app.state.config.YOUTUBE_LOADER_PROXY_URL, + ) + else: + return get_web_loader( + url, + verify_ssl=request.app.state.config.ENABLE_WEB_LOADER_SSL_VERIFICATION, + requests_per_second=request.app.state.config.WEB_LOADER_CONCURRENT_REQUESTS, + ) + + +def get_content_from_url(request, url: str) -> str: + loader = get_loader(request, url) + docs = loader.load() + content = " ".join([doc.page_content for doc in docs]) + return content, docs + + class VectorSearchRetriever(BaseRetriever): collection_name: Any embedding_function: Any @@ -188,7 +220,11 @@ def query_doc_with_hybrid_search( zip(distances, metadatas, documents), key=lambda x: x[0], reverse=True ) sorted_items = sorted_items[:k] - distances, documents, metadatas = map(list, zip(*sorted_items)) + + if sorted_items: + distances, documents, metadatas = map(list, zip(*sorted_items)) + else: + distances, documents, metadatas = [], [], [] result = { "distances": [distances], @@ -571,6 +607,13 @@ def get_sources_from_items( "metadatas": [[{"file_id": chat.id, "name": chat.title}]], } + elif item.get("type") == "url": + content, docs = get_content_from_url(request, item.get("url")) + if docs: + query_result = { + "documents": [[content]], + "metadatas": [[{"url": item.get("url"), "name": item.get("url")}]], + } elif item.get("type") == "file": if ( item.get("context") == "full" @@ -736,7 +779,6 @@ def get_sources_from_items( sources.append(source) except Exception as e: log.exception(e) - return sources diff --git a/backend/open_webui/retrieval/web/utils.py b/backend/open_webui/retrieval/web/utils.py index 5ba27ee8f0..61356adb56 100644 --- a/backend/open_webui/retrieval/web/utils.py +++ b/backend/open_webui/retrieval/web/utils.py @@ -75,7 +75,8 @@ def safe_validate_urls(url: Sequence[str]) -> Sequence[str]: try: if validate_url(u): valid_urls.append(u) - except ValueError: + except Exception as e: + log.debug(f"Invalid URL {u}: {str(e)}") continue return valid_urls diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py index 100610a83a..cb7a57b5b7 100644 --- a/backend/open_webui/routers/audio.py +++ b/backend/open_webui/routers/audio.py @@ -3,6 +3,7 @@ import json import logging import os import uuid +import html from functools import lru_cache from pydub import AudioSegment from pydub.silence import split_on_silence @@ -153,6 +154,7 @@ def set_faster_whisper_model(model: str, auto_update: bool = False): class TTSConfigForm(BaseModel): OPENAI_API_BASE_URL: str OPENAI_API_KEY: str + OPENAI_PARAMS: Optional[dict] = None API_KEY: str ENGINE: str MODEL: str @@ -189,6 +191,7 @@ async def get_audio_config(request: Request, user=Depends(get_admin_user)): "tts": { "OPENAI_API_BASE_URL": request.app.state.config.TTS_OPENAI_API_BASE_URL, "OPENAI_API_KEY": request.app.state.config.TTS_OPENAI_API_KEY, + "OPENAI_PARAMS": request.app.state.config.TTS_OPENAI_PARAMS, "API_KEY": request.app.state.config.TTS_API_KEY, "ENGINE": request.app.state.config.TTS_ENGINE, "MODEL": request.app.state.config.TTS_MODEL, @@ -221,6 +224,7 @@ async def update_audio_config( ): request.app.state.config.TTS_OPENAI_API_BASE_URL = form_data.tts.OPENAI_API_BASE_URL request.app.state.config.TTS_OPENAI_API_KEY = form_data.tts.OPENAI_API_KEY + request.app.state.config.TTS_OPENAI_PARAMS = form_data.tts.OPENAI_PARAMS request.app.state.config.TTS_API_KEY = form_data.tts.API_KEY request.app.state.config.TTS_ENGINE = form_data.tts.ENGINE request.app.state.config.TTS_MODEL = form_data.tts.MODEL @@ -261,12 +265,13 @@ async def update_audio_config( return { "tts": { - "OPENAI_API_BASE_URL": request.app.state.config.TTS_OPENAI_API_BASE_URL, - "OPENAI_API_KEY": request.app.state.config.TTS_OPENAI_API_KEY, - "API_KEY": request.app.state.config.TTS_API_KEY, "ENGINE": request.app.state.config.TTS_ENGINE, "MODEL": request.app.state.config.TTS_MODEL, "VOICE": request.app.state.config.TTS_VOICE, + "OPENAI_API_BASE_URL": request.app.state.config.TTS_OPENAI_API_BASE_URL, + "OPENAI_API_KEY": request.app.state.config.TTS_OPENAI_API_KEY, + "OPENAI_PARAMS": request.app.state.config.TTS_OPENAI_PARAMS, + "API_KEY": request.app.state.config.TTS_API_KEY, "SPLIT_ON": request.app.state.config.TTS_SPLIT_ON, "AZURE_SPEECH_REGION": request.app.state.config.TTS_AZURE_SPEECH_REGION, "AZURE_SPEECH_BASE_URL": request.app.state.config.TTS_AZURE_SPEECH_BASE_URL, @@ -336,6 +341,11 @@ async def speech(request: Request, user=Depends(get_verified_user)): async with aiohttp.ClientSession( timeout=timeout, trust_env=True ) as session: + payload = { + **payload, + **(request.app.state.config.TTS_OPENAI_PARAMS or {}), + } + r = await session.post( url=f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/speech", json=payload, @@ -458,7 +468,7 @@ async def speech(request: Request, user=Depends(get_verified_user)): try: data = f""" - {payload["input"]} + {html.escape(payload["input"])} """ timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT) async with aiohttp.ClientSession( diff --git a/backend/open_webui/routers/channels.py b/backend/open_webui/routers/channels.py index 77c3d9ba53..fda0879594 100644 --- a/backend/open_webui/routers/channels.py +++ b/backend/open_webui/routers/channels.py @@ -340,11 +340,12 @@ async def model_response_handler(request, channel, message, user): if file.get("type", "") == "image": images.append(file.get("url", "")) + thread_history_string = "\n\n".join(thread_history) system_message = { "role": "system", "content": f"You are {model.get('name', model_id)}, participating in a threaded conversation. Be concise and conversational." + ( - f"Here's the thread history:\n\n{''.join([f'{msg}' for msg in thread_history])}\n\nContinue the conversation naturally as {model.get('name', model_id)}, addressing the most recent message while being aware of the full context." + f"Here's the thread history:\n\n\n{thread_history_string}\n\n\nContinue the conversation naturally as {model.get('name', model_id)}, addressing the most recent message while being aware of the full context." if thread_history else "" ), @@ -384,19 +385,34 @@ async def model_response_handler(request, channel, message, user): ) if res: - await update_message_by_id( - channel.id, - response_message.id, - MessageForm( - **{ - "content": res["choices"][0]["message"]["content"], - "meta": { - "done": True, - }, - } - ), - user, - ) + if res.get("choices", []) and len(res["choices"]) > 0: + await update_message_by_id( + channel.id, + response_message.id, + MessageForm( + **{ + "content": res["choices"][0]["message"]["content"], + "meta": { + "done": True, + }, + } + ), + user, + ) + elif res.get("error", None): + await update_message_by_id( + channel.id, + response_message.id, + MessageForm( + **{ + "content": f"Error: {res['error']}", + "meta": { + "done": True, + }, + } + ), + user, + ) except Exception as e: log.info(e) pass @@ -436,7 +452,7 @@ async def new_message_handler( } await sio.emit( - "channel-events", + "events:channel", event_data, to=f"channel:{channel.id}", ) @@ -447,7 +463,7 @@ async def new_message_handler( if parent_message: await sio.emit( - "channel-events", + "events:channel", { "channel_id": channel.id, "message_id": parent_message.id, @@ -644,7 +660,7 @@ async def update_message_by_id( if message: await sio.emit( - "channel-events", + "events:channel", { "channel_id": channel.id, "message_id": message.id, @@ -708,7 +724,7 @@ async def add_reaction_to_message( message = Messages.get_message_by_id(message_id) await sio.emit( - "channel-events", + "events:channel", { "channel_id": channel.id, "message_id": message.id, @@ -774,7 +790,7 @@ async def remove_reaction_by_id_and_user_id_and_name( message = Messages.get_message_by_id(message_id) await sio.emit( - "channel-events", + "events:channel", { "channel_id": channel.id, "message_id": message.id, @@ -839,7 +855,7 @@ async def delete_message_by_id( try: Messages.delete_message_by_id(message_id) await sio.emit( - "channel-events", + "events:channel", { "channel_id": channel.id, "message_id": message.id, @@ -862,7 +878,7 @@ async def delete_message_by_id( if parent_message: await sio.emit( - "channel-events", + "events:channel", { "channel_id": channel.id, "message_id": parent_message.id, diff --git a/backend/open_webui/routers/configs.py b/backend/open_webui/routers/configs.py index f19fbeedd0..10577e26d6 100644 --- a/backend/open_webui/routers/configs.py +++ b/backend/open_webui/routers/configs.py @@ -213,7 +213,7 @@ async def verify_tool_servers_config( ) async with aiohttp.ClientSession() as session: async with session.get( - discovery_urls[0] + discovery_url ) as oauth_server_metadata_response: if oauth_server_metadata_response.status == 200: try: @@ -234,7 +234,7 @@ async def verify_tool_servers_config( ) raise HTTPException( status_code=400, - detail=f"Failed to parse OAuth 2.1 discovery document from {discovery_urls[0]}", + detail=f"Failed to parse OAuth 2.1 discovery document from {discovery_url}", ) raise HTTPException( diff --git a/backend/open_webui/routers/folders.py b/backend/open_webui/routers/folders.py index 51c1eba5f4..b242b08e3a 100644 --- a/backend/open_webui/routers/folders.py +++ b/backend/open_webui/routers/folders.py @@ -50,7 +50,15 @@ async def get_folders(user=Depends(get_verified_user)): folders = Folders.get_folders_by_user_id(user.id) # Verify folder data integrity + folder_list = [] for folder in folders: + if folder.parent_id and not Folders.get_folder_by_id_and_user_id( + folder.parent_id, user.id + ): + folder = Folders.update_folder_parent_id_by_id_and_user_id( + folder.id, user.id, None + ) + if folder.data: if "files" in folder.data: valid_files = [] @@ -74,12 +82,9 @@ async def get_folders(user=Depends(get_verified_user)): folder.id, user.id, FolderUpdateForm(data=folder.data) ) - return [ - { - **folder.model_dump(), - } - for folder in folders - ] + folder_list.append(FolderNameIdResponse(**folder.model_dump())) + + return folder_list ############################ @@ -265,21 +270,31 @@ async def delete_folder_by_id( detail=ERROR_MESSAGES.ACCESS_PROHIBITED, ) - folder = Folders.get_folder_by_id_and_user_id(id, user.id) - if folder: - try: - folder_ids = Folders.delete_folder_by_id_and_user_id(id, user.id) - for folder_id in folder_ids: - Chats.delete_chats_by_user_id_and_folder_id(user.id, folder_id) + folders = [] + folders.append(Folders.get_folder_by_id_and_user_id(id, user.id)) + while folders: + folder = folders.pop() + if folder: + try: + folder_ids = Folders.delete_folder_by_id_and_user_id(id, user.id) + for folder_id in folder_ids: + Chats.delete_chats_by_user_id_and_folder_id(user.id, folder_id) + + return True + except Exception as e: + log.exception(e) + log.error(f"Error deleting folder: {id}") + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=ERROR_MESSAGES.DEFAULT("Error deleting folder"), + ) + finally: + # Get all subfolders + subfolders = Folders.get_folders_by_parent_id_and_user_id( + folder.id, user.id + ) + folders.extend(subfolders) - return True - except Exception as e: - log.exception(e) - log.error(f"Error deleting folder: {id}") - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=ERROR_MESSAGES.DEFAULT("Error deleting folder"), - ) else: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, diff --git a/backend/open_webui/routers/functions.py b/backend/open_webui/routers/functions.py index c36e656d5f..c8f131553c 100644 --- a/backend/open_webui/routers/functions.py +++ b/backend/open_webui/routers/functions.py @@ -10,6 +10,7 @@ from open_webui.models.functions import ( FunctionForm, FunctionModel, FunctionResponse, + FunctionUserResponse, FunctionWithValvesModel, Functions, ) @@ -42,6 +43,11 @@ async def get_functions(user=Depends(get_verified_user)): return Functions.get_functions() +@router.get("/list", response_model=list[FunctionUserResponse]) +async def get_function_list(user=Depends(get_admin_user)): + return Functions.get_function_list() + + ############################ # ExportFunctions ############################ diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index bf11ffa0dd..64b0687afa 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -1020,6 +1020,10 @@ class GenerateEmbedForm(BaseModel): options: Optional[dict] = None keep_alive: Optional[Union[int, str]] = None + model_config = ConfigDict( + extra="allow", + ) + @router.post("/api/embed") @router.post("/api/embed/{url_idx}") diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index e8865b90a0..8c5e3da736 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -190,6 +190,9 @@ async def get_headers_and_cookies( if token: headers["Authorization"] = f"Bearer {token}" + if config.get("headers") and isinstance(config.get("headers"), dict): + headers = {**headers, **config.get("headers")} + return headers, cookies diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index d322addfa6..c79d3ce656 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -5,6 +5,7 @@ import os import shutil import asyncio +import re import uuid from datetime import datetime from pathlib import Path @@ -70,6 +71,7 @@ from open_webui.retrieval.web.firecrawl import search_firecrawl from open_webui.retrieval.web.external import search_external from open_webui.retrieval.utils import ( + get_content_from_url, get_embedding_function, get_reranking_function, get_model_path, @@ -189,6 +191,26 @@ def get_rf( log.error(f"CrossEncoder: {e}") raise Exception(ERROR_MESSAGES.DEFAULT("CrossEncoder error")) + # Safely adjust pad_token_id if missing as some models do not have this in config + try: + model_cfg = getattr(rf, "model", None) + if model_cfg and hasattr(model_cfg, "config"): + cfg = model_cfg.config + if getattr(cfg, "pad_token_id", None) is None: + # Fallback to eos_token_id when available + eos = getattr(cfg, "eos_token_id", None) + if eos is not None: + cfg.pad_token_id = eos + log.debug( + f"Missing pad_token_id detected; set to eos_token_id={eos}" + ) + else: + log.warning( + "Neither pad_token_id nor eos_token_id present in model config" + ) + except Exception as e2: + log.warning(f"Failed to adjust pad_token_id on CrossEncoder: {e2}") + return rf @@ -429,6 +451,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "EXTERNAL_DOCUMENT_LOADER_API_KEY": request.app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY, "TIKA_SERVER_URL": request.app.state.config.TIKA_SERVER_URL, "DOCLING_SERVER_URL": request.app.state.config.DOCLING_SERVER_URL, + "DOCLING_PARAMS": request.app.state.config.DOCLING_PARAMS, "DOCLING_DO_OCR": request.app.state.config.DOCLING_DO_OCR, "DOCLING_FORCE_OCR": request.app.state.config.DOCLING_FORCE_OCR, "DOCLING_OCR_ENGINE": request.app.state.config.DOCLING_OCR_ENGINE, @@ -590,6 +613,7 @@ class ConfigForm(BaseModel): # Content extraction settings CONTENT_EXTRACTION_ENGINE: Optional[str] = None PDF_EXTRACT_IMAGES: Optional[bool] = None + DATALAB_MARKER_API_KEY: Optional[str] = None DATALAB_MARKER_API_BASE_URL: Optional[str] = None DATALAB_MARKER_ADDITIONAL_CONFIG: Optional[str] = None @@ -601,11 +625,13 @@ class ConfigForm(BaseModel): DATALAB_MARKER_FORMAT_LINES: Optional[bool] = None DATALAB_MARKER_USE_LLM: Optional[bool] = None DATALAB_MARKER_OUTPUT_FORMAT: Optional[str] = None + EXTERNAL_DOCUMENT_LOADER_URL: Optional[str] = None EXTERNAL_DOCUMENT_LOADER_API_KEY: Optional[str] = None TIKA_SERVER_URL: Optional[str] = None DOCLING_SERVER_URL: Optional[str] = None + DOCLING_PARAMS: Optional[dict] = None DOCLING_DO_OCR: Optional[bool] = None DOCLING_FORCE_OCR: Optional[bool] = None DOCLING_OCR_ENGINE: Optional[str] = None @@ -782,6 +808,11 @@ async def update_rag_config( if form_data.DOCLING_SERVER_URL is not None else request.app.state.config.DOCLING_SERVER_URL ) + request.app.state.config.DOCLING_PARAMS = ( + form_data.DOCLING_PARAMS + if form_data.DOCLING_PARAMS is not None + else request.app.state.config.DOCLING_PARAMS + ) request.app.state.config.DOCLING_DO_OCR = ( form_data.DOCLING_DO_OCR if form_data.DOCLING_DO_OCR is not None @@ -1104,6 +1135,7 @@ async def update_rag_config( "EXTERNAL_DOCUMENT_LOADER_API_KEY": request.app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY, "TIKA_SERVER_URL": request.app.state.config.TIKA_SERVER_URL, "DOCLING_SERVER_URL": request.app.state.config.DOCLING_SERVER_URL, + "DOCLING_PARAMS": request.app.state.config.DOCLING_PARAMS, "DOCLING_DO_OCR": request.app.state.config.DOCLING_DO_OCR, "DOCLING_FORCE_OCR": request.app.state.config.DOCLING_FORCE_OCR, "DOCLING_OCR_ENGINE": request.app.state.config.DOCLING_OCR_ENGINE, @@ -1522,6 +1554,7 @@ def process_file( "picture_description_mode": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE, "picture_description_local": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL, "picture_description_api": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_API, + **request.app.state.config.DOCLING_PARAMS, }, PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES, DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, @@ -1680,49 +1713,6 @@ def process_text( @router.post("/process/youtube") -def process_youtube_video( - request: Request, form_data: ProcessUrlForm, user=Depends(get_verified_user) -): - try: - collection_name = form_data.collection_name - if not collection_name: - collection_name = calculate_sha256_string(form_data.url)[:63] - - loader = YoutubeLoader( - form_data.url, - language=request.app.state.config.YOUTUBE_LOADER_LANGUAGE, - proxy_url=request.app.state.config.YOUTUBE_LOADER_PROXY_URL, - ) - - docs = loader.load() - content = " ".join([doc.page_content for doc in docs]) - log.debug(f"text_content: {content}") - - save_docs_to_vector_db( - request, docs, collection_name, overwrite=True, user=user - ) - - return { - "status": True, - "collection_name": collection_name, - "filename": form_data.url, - "file": { - "data": { - "content": content, - }, - "meta": { - "name": form_data.url, - }, - }, - } - except Exception as e: - log.exception(e) - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=ERROR_MESSAGES.DEFAULT(e), - ) - - @router.post("/process/web") def process_web( request: Request, form_data: ProcessUrlForm, user=Depends(get_verified_user) @@ -1732,19 +1722,16 @@ def process_web( if not collection_name: collection_name = calculate_sha256_string(form_data.url)[:63] - loader = get_web_loader( - form_data.url, - verify_ssl=request.app.state.config.ENABLE_WEB_LOADER_SSL_VERIFICATION, - requests_per_second=request.app.state.config.WEB_LOADER_CONCURRENT_REQUESTS, - ) - docs = loader.load() - content = " ".join([doc.page_content for doc in docs]) - + content, docs = get_content_from_url(request, form_data.url) log.debug(f"text_content: {content}") if not request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL: save_docs_to_vector_db( - request, docs, collection_name, overwrite=True, user=user + request, + docs, + collection_name, + overwrite=True, + user=user, ) else: collection_name = None @@ -2047,7 +2034,7 @@ async def process_web_search( result_items = [] try: - logging.info( + logging.debug( f"trying to web search with {request.app.state.config.WEB_SEARCH_ENGINE, form_data.queries}" ) @@ -2081,6 +2068,12 @@ async def process_web_search( detail=ERROR_MESSAGES.WEB_SEARCH_ERROR(e), ) + if len(urls) == 0: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=ERROR_MESSAGES.DEFAULT("No results found from web search"), + ) + try: if request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER: search_results = [ diff --git a/backend/open_webui/socket/main.py b/backend/open_webui/socket/main.py index e481571df4..47b2c57961 100644 --- a/backend/open_webui/socket/main.py +++ b/backend/open_webui/socket/main.py @@ -356,7 +356,7 @@ async def join_note(sid, data): await sio.enter_room(sid, f"note:{note.id}") -@sio.on("channel-events") +@sio.on("events:channel") async def channel_events(sid, data): room = f"channel:{data['channel_id']}" participants = sio.manager.get_participants( @@ -373,7 +373,7 @@ async def channel_events(sid, data): if event_type == "typing": await sio.emit( - "channel-events", + "events:channel", { "channel_id": data["channel_id"], "message_id": data.get("message_id", None), @@ -653,12 +653,15 @@ def get_event_emitter(request_info, update_db=True): ) ) + chat_id = request_info.get("chat_id", None) + message_id = request_info.get("message_id", None) + emit_tasks = [ sio.emit( - "chat-events", + "events", { - "chat_id": request_info.get("chat_id", None), - "message_id": request_info.get("message_id", None), + "chat_id": chat_id, + "message_id": message_id, "data": event_data, }, to=session_id, @@ -667,8 +670,11 @@ def get_event_emitter(request_info, update_db=True): ] await asyncio.gather(*emit_tasks) - - if update_db: + if ( + update_db + and message_id + and not request_info.get("chat_id", "").startswith("local:") + ): if "type" in event_data and event_data["type"] == "status": Chats.add_message_status_to_chat_by_id_and_message_id( request_info["chat_id"], @@ -764,7 +770,7 @@ def get_event_emitter(request_info, update_db=True): def get_event_call(request_info): async def __event_caller__(event_data): response = await sio.call( - "chat-events", + "events", { "chat_id": request_info.get("chat_id", None), "message_id": request_info.get("message_id", None), diff --git a/backend/open_webui/tasks.py b/backend/open_webui/tasks.py index a15e8ac146..3e31438281 100644 --- a/backend/open_webui/tasks.py +++ b/backend/open_webui/tasks.py @@ -164,7 +164,10 @@ async def stop_task(redis, task_id: str): # Task successfully canceled return {"status": True, "message": f"Task {task_id} successfully stopped."} - return {"status": False, "message": f"Failed to stop task {task_id}."} + if task.cancelled() or task.done(): + return {"status": True, "message": f"Task {task_id} successfully cancelled."} + + return {"status": True, "message": f"Cancellation requested for {task_id}."} async def stop_item_tasks(redis: Redis, item_id: str): diff --git a/backend/open_webui/utils/auth.py b/backend/open_webui/utils/auth.py index f941ef9263..e34803ade1 100644 --- a/backend/open_webui/utils/auth.py +++ b/backend/open_webui/utils/auth.py @@ -6,7 +6,7 @@ import hmac import hashlib import requests import os - +import bcrypt from cryptography.hazmat.primitives.ciphers.aead import AESGCM from cryptography.hazmat.primitives.asymmetric import ed25519 @@ -38,11 +38,8 @@ from open_webui.env import ( from fastapi import BackgroundTasks, Depends, HTTPException, Request, Response, status from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer -from passlib.context import CryptContext -logging.getLogger("passlib").setLevel(logging.ERROR) - log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["OAUTH"]) @@ -155,19 +152,25 @@ def get_license_data(app, key): bearer_security = HTTPBearer(auto_error=False) -pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") -def verify_password(plain_password, hashed_password): +def get_password_hash(password: str) -> str: + """Hash a password using bcrypt""" + return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8") + + +def verify_password(plain_password: str, hashed_password: str) -> bool: + """Verify a password against its hash""" return ( - pwd_context.verify(plain_password, hashed_password) if hashed_password else None + bcrypt.checkpw( + plain_password.encode("utf-8"), + hashed_password.encode("utf-8"), + ) + if hashed_password + else None ) -def get_password_hash(password): - return pwd_context.hash(password) - - def create_token(data: dict, expires_delta: Union[timedelta, None] = None) -> str: payload = data.copy() diff --git a/backend/open_webui/utils/chat.py b/backend/open_webui/utils/chat.py index 83483f391b..8b6a0b9da2 100644 --- a/backend/open_webui/utils/chat.py +++ b/backend/open_webui/utils/chat.py @@ -80,6 +80,7 @@ async def generate_direct_chat_completion( event_caller = get_event_call(metadata) channel = f"{user_id}:{session_id}:{request_id}" + logging.info(f"WebSocket channel: {channel}") if form_data.get("stream"): q = asyncio.Queue() @@ -121,7 +122,10 @@ async def generate_direct_chat_completion( yield f"data: {json.dumps(data)}\n\n" elif isinstance(data, str): - yield data + if "data:" in data: + yield f"{data}\n\n" + else: + yield f"data: {data}\n\n" except Exception as e: log.debug(f"Error in event generator: {e}") pass diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index e4bf1195ff..bbfdd6a368 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -40,7 +40,10 @@ from open_webui.routers.tasks import ( generate_image_prompt, generate_chat_tags, ) -from open_webui.routers.retrieval import process_web_search, SearchForm +from open_webui.routers.retrieval import ( + process_web_search, + SearchForm, +) from open_webui.routers.images import ( load_b64_image_data, image_generations, @@ -76,14 +79,17 @@ from open_webui.utils.task import ( ) from open_webui.utils.misc import ( deep_update, + extract_urls, get_message_list, add_or_update_system_message, add_or_update_user_message, get_last_user_message, + get_last_user_message_item, get_last_assistant_message, get_system_message, prepend_to_first_user_message_content, convert_logit_bias_input_to_json, + get_content_from_message, ) from open_webui.utils.tools import get_tools from open_webui.utils.plugin import load_function_module_by_id @@ -147,7 +153,7 @@ def process_tool_result( if isinstance(tool_result, HTMLResponse): content_disposition = tool_result.headers.get("Content-Disposition", "") if "inline" in content_disposition: - content = tool_result.body.decode("utf-8") + content = tool_result.body.decode("utf-8", "replace") tool_result_embeds.append(content) if 200 <= tool_result.status_code < 300: @@ -175,7 +181,7 @@ def process_tool_result( "message": f"{tool_function_name}: Unexpected status code {tool_result.status_code} from embedded UI result.", } else: - tool_result = tool_result.body.decode("utf-8") + tool_result = tool_result.body.decode("utf-8", "replace") elif (tool_type == "external" and isinstance(tool_result, tuple)) or ( direct_tool and isinstance(tool_result, list) and len(tool_result) == 2 @@ -283,7 +289,7 @@ async def chat_completion_tools_handler( content = None if hasattr(response, "body_iterator"): async for chunk in response.body_iterator: - data = json.loads(chunk.decode("utf-8")) + data = json.loads(chunk.decode("utf-8", "replace")) content = data["choices"][0]["message"]["content"] # Cleanup any remaining background tasks if necessary @@ -298,7 +304,7 @@ async def chat_completion_tools_handler( recent_messages = messages[-4:] if len(messages) > 4 else messages chat_history = "\n".join( - f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\"" + f"{message['role'].upper()}: \"\"\"{get_content_from_message(message)}\"\"\"" for message in recent_messages ) @@ -821,7 +827,11 @@ async def chat_completion_files_handler( if files := body.get("metadata", {}).get("files", None): # Check if all files are in full context mode - all_full_context = all(item.get("context") == "full" for item in files) + all_full_context = all( + item.get("context") == "full" + for item in files + if item.get("type") == "file" + ) queries = [] if not all_full_context: @@ -853,10 +863,6 @@ async def chat_completion_files_handler( except: pass - if len(queries) == 0: - queries = [get_last_user_message(body["messages"])] - - if not all_full_context: await __event_emitter__( { "type": "status", @@ -868,6 +874,9 @@ async def chat_completion_files_handler( } ) + if len(queries) == 0: + queries = [get_last_user_message(body["messages"])] + try: # Offload get_sources_from_items to a separate thread loop = asyncio.get_running_loop() @@ -906,7 +915,6 @@ async def chat_completion_files_handler( log.debug(f"rag_contexts:sources: {sources}") unique_ids = set() - for source in sources or []: if not source or len(source.keys()) == 0: continue @@ -925,7 +933,6 @@ async def chat_completion_files_handler( unique_ids.add(_id) sources_count = len(unique_ids) - await __event_emitter__( { "type": "status", @@ -999,11 +1006,11 @@ async def process_chat_payload(request, form_data, user, metadata, model): log.debug(f"form_data: {form_data}") system_message = get_system_message(form_data.get("messages", [])) - if system_message: + if system_message: # Chat Controls/User Settings try: form_data = apply_system_prompt_to_body( - system_message.get("content"), form_data, metadata, user - ) + system_message.get("content"), form_data, metadata, user, replace=True + ) # Required to handle system prompt variables except: pass @@ -1168,8 +1175,28 @@ async def process_chat_payload(request, form_data, user, metadata, model): tool_ids = form_data.pop("tool_ids", None) files = form_data.pop("files", None) - # Remove files duplicates + prompt = get_last_user_message(form_data["messages"]) + # TODO: re-enable URL extraction from prompt + # urls = [] + # if prompt and len(prompt or "") < 500 and (not files or len(files) == 0): + # urls = extract_urls(prompt) + if files: + if not files: + files = [] + + for file_item in files: + if file_item.get("type", "file") == "folder": + # Get folder files + folder_id = file_item.get("id", None) + if folder_id: + folder = Folders.get_folder_by_id_and_user_id(folder_id, user.id) + if folder and folder.data and "files" in folder.data: + files = [f for f in files if f.get("id", None) != folder_id] + files = [*files, *folder.data["files"]] + + # files = [*files, *[{"type": "url", "url": url, "name": url} for url in urls]] + # Remove duplicate files based on their content files = list({json.dumps(f, sort_keys=True): f for f in files}.values()) metadata = { @@ -1261,9 +1288,6 @@ async def process_chat_payload(request, form_data, user, metadata, model): def make_tool_function(client, function_name): async def tool_function(**kwargs): - print(kwargs) - print(client) - print(await client.list_tool_specs()) return await client.call_tool( function_name, function_args=kwargs, @@ -1370,8 +1394,6 @@ async def process_chat_payload(request, form_data, user, metadata, model): ) context_string = context_string.strip() - - prompt = get_last_user_message(form_data["messages"]) if prompt is None: raise Exception("No user message found") @@ -1410,10 +1432,6 @@ async def process_chat_payload(request, form_data, user, metadata, model): } ) - print("Final form_data:", form_data) - print("Final metadata:", metadata) - print("Final events:", events) - return form_data, metadata, events @@ -1421,10 +1439,13 @@ async def process_chat_response( request, response, form_data, user, metadata, model, events, tasks ): async def background_tasks_handler(): - messages_map = Chats.get_messages_map_by_chat_id(metadata["chat_id"]) - message = messages_map.get(metadata["message_id"]) if messages_map else None + message = None + messages = [] + + if "chat_id" in metadata and not metadata["chat_id"].startswith("local:"): + messages_map = Chats.get_messages_map_by_chat_id(metadata["chat_id"]) + message = messages_map.get(metadata["message_id"]) if messages_map else None - if message: message_list = get_message_list(messages_map, metadata["message_id"]) # Remove details tags and files from the messages. @@ -1457,7 +1478,14 @@ async def process_chat_response( "content": content, } ) + else: + # Local temp chat, get the model and message from the form_data + message = get_last_user_message_item(form_data.get("messages", [])) + messages = form_data.get("messages", []) + if message: + message["model"] = form_data.get("model") + if message and "model" in message: if tasks and messages: if ( TASKS.FOLLOW_UP_GENERATION in tasks @@ -1476,10 +1504,12 @@ async def process_chat_response( if res and isinstance(res, dict): if len(res.get("choices", [])) == 1: - follow_ups_string = ( - res.get("choices", [])[0] - .get("message", {}) - .get("content", "") + response_message = res.get("choices", [])[0].get( + "message", {} + ) + + follow_ups_string = response_message.get( + "content", response_message.get("reasoning_content", "") ) else: follow_ups_string = "" @@ -1493,15 +1523,6 @@ async def process_chat_response( follow_ups = json.loads(follow_ups_string).get( "follow_ups", [] ) - - Chats.upsert_message_to_chat_by_id_and_message_id( - metadata["chat_id"], - metadata["message_id"], - { - "followUps": follow_ups, - }, - ) - await event_emitter( { "type": "chat:message:follow_ups", @@ -1510,17 +1531,96 @@ async def process_chat_response( }, } ) + + if not metadata.get("chat_id", "").startswith("local:"): + Chats.upsert_message_to_chat_by_id_and_message_id( + metadata["chat_id"], + metadata["message_id"], + { + "followUps": follow_ups, + }, + ) + except Exception as e: pass - if TASKS.TITLE_GENERATION in tasks: - user_message = get_last_user_message(messages) - if user_message and len(user_message) > 100: - user_message = user_message[:100] + "..." + if not metadata.get("chat_id", "").startswith( + "local:" + ): # Only update titles and tags for non-temp chats + if ( + TASKS.TITLE_GENERATION in tasks + and tasks[TASKS.TITLE_GENERATION] + ): + user_message = get_last_user_message(messages) + if user_message and len(user_message) > 100: + user_message = user_message[:100] + "..." - if tasks[TASKS.TITLE_GENERATION]: + if tasks[TASKS.TITLE_GENERATION]: - res = await generate_title( + res = await generate_title( + request, + { + "model": message["model"], + "messages": messages, + "chat_id": metadata["chat_id"], + }, + user, + ) + + if res and isinstance(res, dict): + if len(res.get("choices", [])) == 1: + response_message = res.get("choices", [])[0].get( + "message", {} + ) + + title_string = response_message.get( + "content", + response_message.get( + "reasoning_content", + message.get("content", user_message), + ), + ) + else: + title_string = "" + + title_string = title_string[ + title_string.find("{") : title_string.rfind("}") + 1 + ] + + try: + title = json.loads(title_string).get( + "title", user_message + ) + except Exception as e: + title = "" + + if not title: + title = messages[0].get("content", user_message) + + Chats.update_chat_title_by_id( + metadata["chat_id"], title + ) + + await event_emitter( + { + "type": "chat:title", + "data": title, + } + ) + elif len(messages) == 2: + title = messages[0].get("content", user_message) + + Chats.update_chat_title_by_id(metadata["chat_id"], title) + + await event_emitter( + { + "type": "chat:title", + "data": message.get("content", user_message), + } + ) + + if TASKS.TAGS_GENERATION in tasks and tasks[TASKS.TAGS_GENERATION]: + res = await generate_chat_tags( request, { "model": message["model"], @@ -1532,89 +1632,35 @@ async def process_chat_response( if res and isinstance(res, dict): if len(res.get("choices", [])) == 1: - title_string = ( - res.get("choices", [])[0] - .get("message", {}) - .get( - "content", message.get("content", user_message) - ) + response_message = res.get("choices", [])[0].get( + "message", {} + ) + + tags_string = response_message.get( + "content", + response_message.get("reasoning_content", ""), ) else: - title_string = "" + tags_string = "" - title_string = title_string[ - title_string.find("{") : title_string.rfind("}") + 1 + tags_string = tags_string[ + tags_string.find("{") : tags_string.rfind("}") + 1 ] try: - title = json.loads(title_string).get( - "title", user_message + tags = json.loads(tags_string).get("tags", []) + Chats.update_chat_tags_by_id( + metadata["chat_id"], tags, user + ) + + await event_emitter( + { + "type": "chat:tags", + "data": tags, + } ) except Exception as e: - title = "" - - if not title: - title = messages[0].get("content", user_message) - - Chats.update_chat_title_by_id(metadata["chat_id"], title) - - await event_emitter( - { - "type": "chat:title", - "data": title, - } - ) - elif len(messages) == 2: - title = messages[0].get("content", user_message) - - Chats.update_chat_title_by_id(metadata["chat_id"], title) - - await event_emitter( - { - "type": "chat:title", - "data": message.get("content", user_message), - } - ) - - if TASKS.TAGS_GENERATION in tasks and tasks[TASKS.TAGS_GENERATION]: - res = await generate_chat_tags( - request, - { - "model": message["model"], - "messages": messages, - "chat_id": metadata["chat_id"], - }, - user, - ) - - if res and isinstance(res, dict): - if len(res.get("choices", [])) == 1: - tags_string = ( - res.get("choices", [])[0] - .get("message", {}) - .get("content", "") - ) - else: - tags_string = "" - - tags_string = tags_string[ - tags_string.find("{") : tags_string.rfind("}") + 1 - ] - - try: - tags = json.loads(tags_string).get("tags", []) - Chats.update_chat_tags_by_id( - metadata["chat_id"], tags, user - ) - - await event_emitter( - { - "type": "chat:tags", - "data": tags, - } - ) - except Exception as e: - pass + pass event_emitter = None event_caller = None @@ -1642,7 +1688,9 @@ async def process_chat_response( response.body, bytes ): try: - response_data = json.loads(response.body.decode("utf-8")) + response_data = json.loads( + response.body.decode("utf-8", "replace") + ) except json.JSONDecodeError: response_data = { "error": {"detail": "Invalid JSON response"} @@ -2276,7 +2324,11 @@ async def process_chat_response( last_delta_data = None async for line in response.body_iterator: - line = line.decode("utf-8") if isinstance(line, bytes) else line + line = ( + line.decode("utf-8", "replace") + if isinstance(line, bytes) + else line + ) data = line # Skip empty lines diff --git a/backend/open_webui/utils/misc.py b/backend/open_webui/utils/misc.py index 81a4142ea0..9984e378fb 100644 --- a/backend/open_webui/utils/misc.py +++ b/backend/open_webui/utils/misc.py @@ -136,6 +136,14 @@ def update_message_content(message: dict, content: str, append: bool = True) -> return message +def replace_system_message_content(content: str, messages: list[dict]) -> dict: + for message in messages: + if message["role"] == "system": + message["content"] = content + break + return messages + + def add_or_update_system_message( content: str, messages: list[dict], append: bool = False ): @@ -523,3 +531,11 @@ def throttle(interval: float = 10.0): return wrapper return decorator + + +def extract_urls(text: str) -> list[str]: + # Regex pattern to match URLs + url_pattern = re.compile( + r"(https?://[^\s]+)", re.IGNORECASE + ) # Matches http and https URLs + return url_pattern.findall(text) diff --git a/backend/open_webui/utils/oauth.py b/backend/open_webui/utils/oauth.py index 6cf91e3f12..e0bf7582c6 100644 --- a/backend/open_webui/utils/oauth.py +++ b/backend/open_webui/utils/oauth.py @@ -62,6 +62,7 @@ from open_webui.env import ( WEBUI_AUTH_COOKIE_SAME_SITE, WEBUI_AUTH_COOKIE_SECURE, ENABLE_OAUTH_ID_TOKEN_COOKIE, + ENABLE_OAUTH_EMAIL_FALLBACK, OAUTH_CLIENT_INFO_ENCRYPTION_KEY, ) from open_webui.utils.misc import parse_duration @@ -82,6 +83,8 @@ class OAuthClientInformationFull(OAuthClientMetadata): client_id_issued_at: int | None = None client_secret_expires_at: int | None = None + server_metadata: Optional[OAuthMetadata] = None # Fetched from the OAuth server + from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL @@ -296,6 +299,7 @@ async def get_oauth_client_info_with_dynamic_client_registration( { **registration_response_json, **{"issuer": oauth_server_metadata_url}, + **{"server_metadata": oauth_server_metadata}, } ) log.info( @@ -331,20 +335,34 @@ class OAuthClientManager: self.clients = {} def add_client(self, client_id, oauth_client_info: OAuthClientInformationFull): - self.clients[client_id] = { - "client": self.oauth.register( - name=client_id, - client_id=oauth_client_info.client_id, - client_secret=oauth_client_info.client_secret, - client_kwargs=( - {"scope": oauth_client_info.scope} - if oauth_client_info.scope - else {} - ), - server_metadata_url=( - oauth_client_info.issuer if oauth_client_info.issuer else None - ), + kwargs = { + "name": client_id, + "client_id": oauth_client_info.client_id, + "client_secret": oauth_client_info.client_secret, + "client_kwargs": ( + {"scope": oauth_client_info.scope} if oauth_client_info.scope else {} ), + "server_metadata_url": ( + oauth_client_info.issuer if oauth_client_info.issuer else None + ), + } + + if ( + oauth_client_info.server_metadata + and oauth_client_info.server_metadata.code_challenge_methods_supported + ): + if ( + isinstance( + oauth_client_info.server_metadata.code_challenge_methods_supported, + list, + ) + and "S256" + in oauth_client_info.server_metadata.code_challenge_methods_supported + ): + kwargs["code_challenge_method"] = "S256" + + self.clients[client_id] = { + "client": self.oauth.register(**kwargs), "client_info": oauth_client_info, } return self.clients[client_id] @@ -367,8 +385,8 @@ class OAuthClientManager: if client_id in self.clients: client = self.clients[client_id] return ( - client.server_metadata_url - if hasattr(client, "server_metadata_url") + client._server_metadata_url + if hasattr(client, "_server_metadata_url") else None ) return None @@ -560,7 +578,17 @@ class OAuthClientManager: error_message = None try: - token = await client.authorize_access_token(request) + client_info = self.get_client_info(client_id) + token_params = {} + if ( + client_info + and hasattr(client_info, "client_id") + and hasattr(client_info, "client_secret") + ): + token_params["client_id"] = client_info.client_id + token_params["client_secret"] = client_info.client_secret + + token = await client.authorize_access_token(request, **token_params) if token: try: # Add timestamp for tracking @@ -615,8 +643,14 @@ class OAuthManager: self.app = app self._clients = {} - for _, provider_config in OAUTH_PROVIDERS.items(): - provider_config["register"](self.oauth) + + for name, provider_config in OAUTH_PROVIDERS.items(): + if "register" not in provider_config: + log.error(f"OAuth provider {name} missing register function") + continue + + client = provider_config["register"](self.oauth) + self._clients[name] = client def get_client(self, provider_name): if provider_name not in self._clients: @@ -627,8 +661,8 @@ class OAuthManager: if provider_name in self._clients: client = self._clients[provider_name] return ( - client.server_metadata_url - if hasattr(client, "server_metadata_url") + client._server_metadata_url + if hasattr(client, "_server_metadata_url") else None ) return None @@ -1147,6 +1181,8 @@ class OAuthManager: except Exception as e: log.warning(f"Error fetching GitHub email: {e}") raise HTTPException(400, detail=ERROR_MESSAGES.INVALID_CRED) + elif ENABLE_OAUTH_EMAIL_FALLBACK: + email = f"{provider_sub}.local" else: log.warning(f"OAuth callback failed, email is missing: {user_data}") raise HTTPException(400, detail=ERROR_MESSAGES.INVALID_CRED) diff --git a/backend/open_webui/utils/payload.py b/backend/open_webui/utils/payload.py index 8cb36b3759..4a431dcab3 100644 --- a/backend/open_webui/utils/payload.py +++ b/backend/open_webui/utils/payload.py @@ -2,6 +2,7 @@ from open_webui.utils.task import prompt_template, prompt_variables_template from open_webui.utils.misc import ( deep_update, add_or_update_system_message, + replace_system_message_content, ) from typing import Callable, Optional @@ -10,7 +11,11 @@ import json # inplace function: form_data is modified def apply_system_prompt_to_body( - system: Optional[str], form_data: dict, metadata: Optional[dict] = None, user=None + system: Optional[str], + form_data: dict, + metadata: Optional[dict] = None, + user=None, + replace: bool = False, ) -> dict: if not system: return form_data @@ -24,9 +29,15 @@ def apply_system_prompt_to_body( # Legacy (API Usage) system = prompt_template(system, user) - form_data["messages"] = add_or_update_system_message( - system, form_data.get("messages", []) - ) + if replace: + form_data["messages"] = replace_system_message_content( + system, form_data.get("messages", []) + ) + else: + form_data["messages"] = add_or_update_system_message( + system, form_data.get("messages", []) + ) + return form_data diff --git a/backend/open_webui/utils/tools.py b/backend/open_webui/utils/tools.py index 5cd7377876..6a0a7346bb 100644 --- a/backend/open_webui/utils/tools.py +++ b/backend/open_webui/utils/tools.py @@ -748,10 +748,6 @@ async def execute_tool_server( if operation.get("requestBody", {}).get("content"): if params: body_params = params - else: - raise Exception( - f"Request body expected for operation '{name}' but none found." - ) async with aiohttp.ClientSession( trust_env=True, timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT) diff --git a/backend/requirements.txt b/backend/requirements.txt index 27e0c24cb7..6b348bad89 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -1,14 +1,13 @@ -fastapi==0.115.7 -uvicorn[standard]==0.35.0 -pydantic==2.11.7 +fastapi==0.118.0 +uvicorn[standard]==0.37.0 +pydantic==2.11.9 python-multipart==0.0.20 itsdangerous==2.2.0 python-socketio==5.13.0 python-jose==3.4.0 -passlib[bcrypt]==1.7.4 cryptography -bcrypt==4.3.0 +bcrypt==5.0.0 argon2-cffi==25.1.0 PyJWT[crypto]==2.10.1 authlib==1.6.3 @@ -30,14 +29,6 @@ peewee-migrate==1.12.2 pycrdt==0.12.25 redis -pymongo - -psycopg2-binary==2.9.10 -pgvector==0.4.1 - -PyMySQL==1.1.1 -boto3==1.40.5 - APScheduler==3.10.4 RestrictedPython==8.0 @@ -57,25 +48,15 @@ langchain==0.3.27 langchain-community==0.3.29 fake-useragent==2.2.0 -chromadb==1.0.20 +chromadb==1.1.0 opensearch-py==2.8.0 -pymilvus==2.5.0 -qdrant-client==1.14.3 -playwright==1.49.1 # Caution: version must match docker-compose.playwright.yaml -elasticsearch==9.1.0 -pinecone==6.0.2 -oracledb==3.2.0 - -av==14.0.1 # Caution: Set due to FATAL FIPS SELFTEST FAILURE, see discussion https://github.com/open-webui/open-webui/discussions/15720 transformers sentence-transformers==5.1.1 accelerate pyarrow==20.0.0 # fix: pin pyarrow version to 20 for rpi compatibility #15897 einops==0.8.1 -colbert-ai==0.2.21 - ftfy==6.2.3 pypdf==6.0.0 fpdf2==2.8.2 @@ -84,7 +65,7 @@ docx2txt==0.8 python-pptx==1.0.2 unstructured==0.16.17 nltk==3.9.1 -Markdown==3.8.2 +Markdown==3.9 pypandoc==1.15 pandas==2.2.3 openpyxl==3.1.5 @@ -105,7 +86,7 @@ onnxruntime==1.20.1 faster-whisper==1.1.1 -black==25.1.0 +black==25.9.0 youtube-transcript-api==1.2.2 pytube==15.0.0 @@ -117,11 +98,6 @@ google-api-python-client google-auth-httplib2 google-auth-oauthlib -## Tests -docker~=7.1.0 -pytest~=8.4.1 -pytest-docker~=3.1.1 - googleapis-common-protos==1.70.0 google-cloud-storage==2.19.0 @@ -129,24 +105,45 @@ azure-identity==1.25.0 azure-storage-blob==12.24.1 +pymongo +psycopg2-binary==2.9.10 +pgvector==0.4.1 + +PyMySQL==1.1.1 +boto3==1.40.5 + +pymilvus==2.6.2 +qdrant-client==1.14.3 +playwright==1.49.1 # Caution: version must match docker-compose.playwright.yaml +elasticsearch==9.1.0 +pinecone==6.0.2 +oracledb==3.2.0 + +av==14.0.1 # Caution: Set due to FATAL FIPS SELFTEST FAILURE, see discussion https://github.com/open-webui/open-webui/discussions/15720 + +colbert-ai==0.2.21 + + +## Tests +docker~=7.1.0 +pytest~=8.4.1 +pytest-docker~=3.1.1 + ## LDAP ldap3==2.9.1 ## Firecrawl firecrawl-py==1.12.0 -# Sougou API SDK(Tencentcloud SDK) -tencentcloud-sdk-python==3.0.1336 - ## Trace -opentelemetry-api==1.36.0 -opentelemetry-sdk==1.36.0 -opentelemetry-exporter-otlp==1.36.0 -opentelemetry-instrumentation==0.57b0 -opentelemetry-instrumentation-fastapi==0.57b0 -opentelemetry-instrumentation-sqlalchemy==0.57b0 -opentelemetry-instrumentation-redis==0.57b0 -opentelemetry-instrumentation-requests==0.57b0 -opentelemetry-instrumentation-logging==0.57b0 -opentelemetry-instrumentation-httpx==0.57b0 -opentelemetry-instrumentation-aiohttp-client==0.57b0 +opentelemetry-api==1.37.0 +opentelemetry-sdk==1.37.0 +opentelemetry-exporter-otlp==1.37.0 +opentelemetry-instrumentation==0.58b0 +opentelemetry-instrumentation-fastapi==0.58b0 +opentelemetry-instrumentation-sqlalchemy==0.58b0 +opentelemetry-instrumentation-redis==0.58b0 +opentelemetry-instrumentation-requests==0.58b0 +opentelemetry-instrumentation-logging==0.58b0 +opentelemetry-instrumentation-httpx==0.58b0 +opentelemetry-instrumentation-aiohttp-client==0.58b0 diff --git a/backend/start.sh b/backend/start.sh index c32498aa45..31e87c9557 100755 --- a/backend/start.sh +++ b/backend/start.sh @@ -70,5 +70,18 @@ if [ -n "$SPACE_ID" ]; then fi PYTHON_CMD=$(command -v python3 || command -v python) +UVICORN_WORKERS="${UVICORN_WORKERS:-1}" -WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec "$PYTHON_CMD" -m uvicorn open_webui.main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' --workers "${UVICORN_WORKERS:-1}" +# If script is called with arguments, use them; otherwise use default workers +if [ "$#" -gt 0 ]; then + ARGS=("$@") +else + ARGS=(--workers "$UVICORN_WORKERS") +fi + +# Run uvicorn +WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec "$PYTHON_CMD" -m uvicorn open_webui.main:app \ + --host "$HOST" \ + --port "$PORT" \ + --forwarded-allow-ips '*' \ + "${ARGS[@]}" \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 6b59776fa0..38cf347d3e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.6.32", + "version": "0.6.33", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.6.32", + "version": "0.6.33", "dependencies": { "@azure/msal-browser": "^4.5.0", "@codemirror/lang-javascript": "^6.2.2", @@ -93,6 +93,8 @@ "turndown-plugin-gfm": "^1.0.2", "undici": "^7.3.0", "uuid": "^9.0.1", + "vega": "^6.2.0", + "vega-lite": "^6.4.1", "vite-plugin-static-copy": "^2.2.0", "y-prosemirror": "^1.3.7", "yaml": "^2.7.1", @@ -5592,6 +5594,99 @@ "node": ">=8" } }, + "node_modules/cliui": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-9.0.1.tgz", + "integrity": "sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w==", + "license": "ISC", + "dependencies": { + "string-width": "^7.2.0", + "strip-ansi": "^7.1.0", + "wrap-ansi": "^9.0.0" + }, + "engines": { + "node": ">=20" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/cliui/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/cliui/node_modules/emoji-regex": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.5.0.tgz", + "integrity": "sha512-lb49vf1Xzfx080OKA0o6l8DQQpV+6Vg95zyCJX9VB/BqKYlhG7N4wgROUUHRA+ZPUefLnteQOad7z1kT2bV7bg==", + "license": "MIT" + }, + "node_modules/cliui/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/clone": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz", @@ -6346,6 +6441,36 @@ "node": ">=12" } }, + "node_modules/d3-geo-projection": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/d3-geo-projection/-/d3-geo-projection-4.0.0.tgz", + "integrity": "sha512-p0bK60CEzph1iqmnxut7d/1kyTmm3UWtPlwdkM31AU+LW+BXazd5zJdoCn7VFxNCHXRngPHRnsNn5uGjLRGndg==", + "license": "ISC", + "dependencies": { + "commander": "7", + "d3-array": "1 - 3", + "d3-geo": "1.12.0 - 3" + }, + "bin": { + "geo2svg": "bin/geo2svg.js", + "geograticule": "bin/geograticule.js", + "geoproject": "bin/geoproject.js", + "geoquantize": "bin/geoquantize.js", + "geostitch": "bin/geostitch.js" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-geo-projection/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, "node_modules/d3-hierarchy": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", @@ -7038,6 +7163,15 @@ "@esbuild/win32-x64": "0.25.1" } }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -7750,6 +7884,27 @@ "resolved": "https://registry.npmjs.org/gc-hook/-/gc-hook-0.3.1.tgz", "integrity": "sha512-E5M+O/h2o7eZzGhzRZGex6hbB3k4NWqO0eA+OzLRLXxhdbYPajZnynPwAtphnh+cRHPwsj5Z80dqZlfI4eK55A==" }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz", + "integrity": "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/get-func-name": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", @@ -8763,6 +8918,12 @@ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true }, + "node_modules/json-stringify-pretty-compact": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/json-stringify-pretty-compact/-/json-stringify-pretty-compact-4.0.0.tgz", + "integrity": "sha512-3CNZ2DnrpByG9Nqj6Xo8vqbjT4F6N+tb4Gb28ESAZjYZ5yqvmc56J+/kuIwkaAMOyblTQhUW7PxMkUb8Q36N3Q==", + "license": "MIT" + }, "node_modules/json-stringify-safe": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", @@ -12770,6 +12931,26 @@ "node": ">=10.13.0" } }, + "node_modules/topojson-client": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/topojson-client/-/topojson-client-3.1.0.tgz", + "integrity": "sha512-605uxS6bcYxGXw9qi62XyrV6Q3xwbndjachmNxu8HWTtVPxZfEJN9fd/SZS1Q54Sn2y0TMyMxFj/cJINqGHrKw==", + "license": "ISC", + "dependencies": { + "commander": "2" + }, + "bin": { + "topo2geo": "bin/topo2geo", + "topomerge": "bin/topomerge", + "topoquantize": "bin/topoquantize" + } + }, + "node_modules/topojson-client/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "license": "MIT" + }, "node_modules/totalist": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/totalist/-/totalist-3.0.1.tgz", @@ -13047,6 +13228,417 @@ "devOptional": true, "license": "MIT" }, + "node_modules/vega": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/vega/-/vega-6.2.0.tgz", + "integrity": "sha512-BIwalIcEGysJdQDjeVUmMWB3e50jPDNAMfLJscjEvpunU9bSt7X1OYnQxkg3uBwuRRI4nWfFZO9uIW910nLeGw==", + "license": "BSD-3-Clause", + "dependencies": { + "vega-crossfilter": "~5.1.0", + "vega-dataflow": "~6.1.0", + "vega-encode": "~5.1.0", + "vega-event-selector": "~4.0.0", + "vega-expression": "~6.1.0", + "vega-force": "~5.1.0", + "vega-format": "~2.1.0", + "vega-functions": "~6.1.0", + "vega-geo": "~5.1.0", + "vega-hierarchy": "~5.1.0", + "vega-label": "~2.1.0", + "vega-loader": "~5.1.0", + "vega-parser": "~7.1.0", + "vega-projection": "~2.1.0", + "vega-regression": "~2.1.0", + "vega-runtime": "~7.1.0", + "vega-scale": "~8.1.0", + "vega-scenegraph": "~5.1.0", + "vega-statistics": "~2.0.0", + "vega-time": "~3.1.0", + "vega-transforms": "~5.1.0", + "vega-typings": "~2.1.0", + "vega-util": "~2.1.0", + "vega-view": "~6.1.0", + "vega-view-transforms": "~5.1.0", + "vega-voronoi": "~5.1.0", + "vega-wordcloud": "~5.1.0" + }, + "funding": { + "url": "https://app.hubspot.com/payments/GyPC972GD9Rt" + } + }, + "node_modules/vega-canvas": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/vega-canvas/-/vega-canvas-2.0.0.tgz", + "integrity": "sha512-9x+4TTw/USYST5nx4yN272sy9WcqSRjAR0tkQYZJ4cQIeon7uVsnohvoPQK1JZu7K1QXGUqzj08z0u/UegBVMA==", + "license": "BSD-3-Clause" + }, + "node_modules/vega-crossfilter": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-crossfilter/-/vega-crossfilter-5.1.0.tgz", + "integrity": "sha512-EmVhfP3p6AM7o/lPan/QAoqjblI19BxWUlvl2TSs0xjQd8KbaYYbS4Ixt3cmEvl0QjRdBMF6CdJJ/cy9DTS4Fw==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "vega-dataflow": "^6.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-dataflow": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/vega-dataflow/-/vega-dataflow-6.1.0.tgz", + "integrity": "sha512-JxumGlODtFbzoQ4c/jQK8Tb/68ih0lrexlCozcMfTAwQ12XhTqCvlafh7MAKKTMBizjOfaQTHm4Jkyb1H5CfyQ==", + "license": "BSD-3-Clause", + "dependencies": { + "vega-format": "^2.1.0", + "vega-loader": "^5.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-encode": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-encode/-/vega-encode-5.1.0.tgz", + "integrity": "sha512-q26oI7B+MBQYcTQcr5/c1AMsX3FvjZLQOBi7yI0vV+GEn93fElDgvhQiYrgeYSD4Exi/jBPeUXuN6p4bLz16kA==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "d3-interpolate": "^3.0.1", + "vega-dataflow": "^6.1.0", + "vega-scale": "^8.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-event-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/vega-event-selector/-/vega-event-selector-4.0.0.tgz", + "integrity": "sha512-CcWF4m4KL/al1Oa5qSzZ5R776q8lRxCj3IafCHs5xipoEHrkgu1BWa7F/IH5HrDNXeIDnqOpSV1pFsAWRak4gQ==", + "license": "BSD-3-Clause" + }, + "node_modules/vega-expression": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/vega-expression/-/vega-expression-6.1.0.tgz", + "integrity": "sha512-hHgNx/fQ1Vn1u6vHSamH7lRMsOa/yQeHGGcWVmh8fZafLdwdhCM91kZD9p7+AleNpgwiwzfGogtpATFaMmDFYg==", + "license": "BSD-3-Clause", + "dependencies": { + "@types/estree": "^1.0.8", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-expression/node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "license": "MIT" + }, + "node_modules/vega-force": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-force/-/vega-force-5.1.0.tgz", + "integrity": "sha512-wdnchOSeXpF9Xx8Yp0s6Do9F7YkFeOn/E/nENtsI7NOcyHpICJ5+UkgjUo9QaQ/Yu+dIDU+sP/4NXsUtq6SMaQ==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-force": "^3.0.0", + "vega-dataflow": "^6.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-format": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vega-format/-/vega-format-2.1.0.tgz", + "integrity": "sha512-i9Ht33IgqG36+S1gFDpAiKvXCPz+q+1vDhDGKK8YsgMxGOG4PzinKakI66xd7SdV4q97FgpR7odAXqtDN2wKqw==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "d3-format": "^3.1.0", + "d3-time-format": "^4.1.0", + "vega-time": "^3.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-functions": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/vega-functions/-/vega-functions-6.1.0.tgz", + "integrity": "sha512-yooEbWt0FWMBNoohwLsl25lEh08WsWabTXbbS+q0IXZzWSpX4Cyi45+q7IFyy/2L4oaIfGIIV14dgn3srQQcGA==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "d3-color": "^3.1.0", + "d3-geo": "^3.1.1", + "vega-dataflow": "^6.1.0", + "vega-expression": "^6.1.0", + "vega-scale": "^8.1.0", + "vega-scenegraph": "^5.1.0", + "vega-selections": "^6.1.0", + "vega-statistics": "^2.0.0", + "vega-time": "^3.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-geo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-geo/-/vega-geo-5.1.0.tgz", + "integrity": "sha512-H8aBBHfthc3rzDbz/Th18+Nvp00J73q3uXGAPDQqizioDm/CoXCK8cX4pMePydBY9S6ikBiGJrLKFDa80wI20g==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "d3-color": "^3.1.0", + "d3-geo": "^3.1.1", + "vega-canvas": "^2.0.0", + "vega-dataflow": "^6.1.0", + "vega-projection": "^2.1.0", + "vega-statistics": "^2.0.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-hierarchy": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-hierarchy/-/vega-hierarchy-5.1.0.tgz", + "integrity": "sha512-rZlU8QJNETlB6o73lGCPybZtw2fBBsRIRuFE77aCLFHdGsh6wIifhplVarqE9icBqjUHRRUOmcEYfzwVIPr65g==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-hierarchy": "^3.1.2", + "vega-dataflow": "^6.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-label": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vega-label/-/vega-label-2.1.0.tgz", + "integrity": "sha512-/hgf+zoA3FViDBehrQT42Lta3t8In6YwtMnwjYlh72zNn1p3c7E3YUBwqmAqTM1x+tudgzMRGLYig+bX1ewZxQ==", + "license": "BSD-3-Clause", + "dependencies": { + "vega-canvas": "^2.0.0", + "vega-dataflow": "^6.1.0", + "vega-scenegraph": "^5.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-lite": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/vega-lite/-/vega-lite-6.4.1.tgz", + "integrity": "sha512-KO3ybHNouRK4A0al/+2fN9UqgTEfxrd/ntGLY933Hg5UOYotDVQdshR3zn7OfXwQ7uj0W96Vfa5R+QxO8am3IQ==", + "license": "BSD-3-Clause", + "dependencies": { + "json-stringify-pretty-compact": "~4.0.0", + "tslib": "~2.8.1", + "vega-event-selector": "~4.0.0", + "vega-expression": "~6.1.0", + "vega-util": "~2.1.0", + "yargs": "~18.0.0" + }, + "bin": { + "vl2pdf": "bin/vl2pdf", + "vl2png": "bin/vl2png", + "vl2svg": "bin/vl2svg", + "vl2vg": "bin/vl2vg" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://app.hubspot.com/payments/GyPC972GD9Rt" + }, + "peerDependencies": { + "vega": "^6.0.0" + } + }, + "node_modules/vega-loader": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-loader/-/vega-loader-5.1.0.tgz", + "integrity": "sha512-GaY3BdSPbPNdtrBz8SYUBNmNd8mdPc3mtdZfdkFazQ0RD9m+Toz5oR8fKnTamNSk9fRTJX0Lp3uEqxrAlQVreg==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-dsv": "^3.0.1", + "topojson-client": "^3.1.0", + "vega-format": "^2.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-parser": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/vega-parser/-/vega-parser-7.1.0.tgz", + "integrity": "sha512-g0lrYxtmYVW8G6yXpIS4J3Uxt9OUSkc0bLu5afoYDo4rZmoOOdll3x3ebActp5LHPW+usZIE+p5nukRS2vEc7Q==", + "license": "BSD-3-Clause", + "dependencies": { + "vega-dataflow": "^6.1.0", + "vega-event-selector": "^4.0.0", + "vega-functions": "^6.1.0", + "vega-scale": "^8.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-projection": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vega-projection/-/vega-projection-2.1.0.tgz", + "integrity": "sha512-EjRjVSoMR5ibrU7q8LaOQKP327NcOAM1+eZ+NO4ANvvAutwmbNVTmfA1VpPH+AD0AlBYc39ND/wnRk7SieDiXA==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-geo": "^3.1.1", + "d3-geo-projection": "^4.0.0", + "vega-scale": "^8.1.0" + } + }, + "node_modules/vega-regression": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vega-regression/-/vega-regression-2.1.0.tgz", + "integrity": "sha512-HzC7MuoEwG1rIxRaNTqgcaYF03z/ZxYkQR2D5BN0N45kLnHY1HJXiEcZkcffTsqXdspLjn47yLi44UoCwF5fxQ==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "vega-dataflow": "^6.1.0", + "vega-statistics": "^2.0.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-runtime": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/vega-runtime/-/vega-runtime-7.1.0.tgz", + "integrity": "sha512-mItI+WHimyEcZlZrQ/zYR3LwHVeyHCWwp7MKaBjkU8EwkSxEEGVceyGUY9X2YuJLiOgkLz/6juYDbMv60pfwYA==", + "license": "BSD-3-Clause", + "dependencies": { + "vega-dataflow": "^6.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-scale": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/vega-scale/-/vega-scale-8.1.0.tgz", + "integrity": "sha512-VEgDuEcOec8+C8+FzLcnAmcXrv2gAJKqQifCdQhkgnsLa978vYUgVfCut/mBSMMHbH8wlUV1D0fKZTjRukA1+A==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "d3-interpolate": "^3.0.1", + "d3-scale": "^4.0.2", + "d3-scale-chromatic": "^3.1.0", + "vega-time": "^3.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-scenegraph": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-scenegraph/-/vega-scenegraph-5.1.0.tgz", + "integrity": "sha512-4gA89CFIxkZX+4Nvl8SZF2MBOqnlj9J5zgdPh/HPx+JOwtzSlUqIhxFpFj7GWYfwzr/PyZnguBLPihPw1Og/cA==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-path": "^3.1.0", + "d3-shape": "^3.2.0", + "vega-canvas": "^2.0.0", + "vega-loader": "^5.1.0", + "vega-scale": "^8.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-selections": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/vega-selections/-/vega-selections-6.1.0.tgz", + "integrity": "sha512-WaHM7D7ghHceEfMsgFeaZnDToWL0mgCFtStVOobNh/OJLh0CL7yNKeKQBqRXJv2Lx74dPNf6nj08+52ytWfW7g==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "3.2.4", + "vega-expression": "^6.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-statistics": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/vega-statistics/-/vega-statistics-2.0.0.tgz", + "integrity": "sha512-dGPfDXnBlgXbZF3oxtkb8JfeRXd5TYHx25Z/tIoaa9jWua4Vf/AoW2wwh8J1qmMy8J03/29aowkp1yk4DOPazQ==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4" + } + }, + "node_modules/vega-time": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/vega-time/-/vega-time-3.1.0.tgz", + "integrity": "sha512-G93mWzPwNa6UYQRkr8Ujur9uqxbBDjDT/WpXjbDY0yygdSkRT+zXF+Sb4gjhW0nPaqdiwkn0R6kZcSPMj1bMNA==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "d3-time": "^3.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-transforms": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-transforms/-/vega-transforms-5.1.0.tgz", + "integrity": "sha512-mj/sO2tSuzzpiXX8JSl4DDlhEmVwM/46MTAzTNQUQzJPMI/n4ChCjr/SdEbfEyzlD4DPm1bjohZGjLc010yuMg==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "vega-dataflow": "^6.1.0", + "vega-statistics": "^2.0.0", + "vega-time": "^3.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-typings": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vega-typings/-/vega-typings-2.1.0.tgz", + "integrity": "sha512-zdis4Fg4gv37yEvTTSZEVMNhp8hwyEl7GZ4X4HHddRVRKxWFsbyKvZx/YW5Z9Ox4sjxVA2qHzEbod4Fdx+SEJA==", + "license": "BSD-3-Clause", + "dependencies": { + "@types/geojson": "7946.0.16", + "vega-event-selector": "^4.0.0", + "vega-expression": "^6.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-util": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/vega-util/-/vega-util-2.1.0.tgz", + "integrity": "sha512-PGfp0m0QCufDmcxKJCWQy4Ov23FoF8DSXmoJwSezi3itQaa2hbxK0+xwsTMP2vy4PR16Pu25HMzgMwXVW1+33w==", + "license": "BSD-3-Clause" + }, + "node_modules/vega-view": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/vega-view/-/vega-view-6.1.0.tgz", + "integrity": "sha512-hmHDm/zC65lb23mb9Tr9Gx0wkxP0TMS31LpMPYxIZpvInxvUn7TYitkOtz1elr63k2YZrgmF7ztdGyQ4iCQ5fQ==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-array": "^3.2.4", + "d3-timer": "^3.0.1", + "vega-dataflow": "^6.1.0", + "vega-format": "^2.1.0", + "vega-functions": "^6.1.0", + "vega-runtime": "^7.1.0", + "vega-scenegraph": "^5.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-view-transforms": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-view-transforms/-/vega-view-transforms-5.1.0.tgz", + "integrity": "sha512-fpigh/xn/32t+An1ShoY3MLeGzNdlbAp2+HvFKzPpmpMTZqJEWkk/J/wHU7Swyc28Ta7W1z3fO+8dZkOYO5TWQ==", + "license": "BSD-3-Clause", + "dependencies": { + "vega-dataflow": "^6.1.0", + "vega-scenegraph": "^5.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-voronoi": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-voronoi/-/vega-voronoi-5.1.0.tgz", + "integrity": "sha512-uKdsoR9x60mz7eYtVG+NhlkdQXeVdMr6jHNAHxs+W+i6kawkUp5S9jp1xf1FmW/uZvtO1eqinHQNwATcDRsiUg==", + "license": "BSD-3-Clause", + "dependencies": { + "d3-delaunay": "^6.0.4", + "vega-dataflow": "^6.1.0", + "vega-util": "^2.1.0" + } + }, + "node_modules/vega-wordcloud": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/vega-wordcloud/-/vega-wordcloud-5.1.0.tgz", + "integrity": "sha512-sSdNmT8y2D7xXhM2h76dKyaYn3PA4eV49WUUkfYfqHz/vpcu10GSAoFxLhQQTkbZXR+q5ZB63tFUow9W2IFo6g==", + "license": "BSD-3-Clause", + "dependencies": { + "vega-canvas": "^2.0.0", + "vega-dataflow": "^6.1.0", + "vega-scale": "^8.1.0", + "vega-statistics": "^2.0.0", + "vega-util": "^2.1.0" + } + }, "node_modules/verror": { "version": "1.10.0", "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", @@ -14209,6 +14801,15 @@ "yjs": "^13.0.0" } }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, "node_modules/yallist": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", @@ -14230,6 +14831,82 @@ "node": ">= 14" } }, + "node_modules/yargs": { + "version": "18.0.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-18.0.0.tgz", + "integrity": "sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg==", + "license": "MIT", + "dependencies": { + "cliui": "^9.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "string-width": "^7.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^22.0.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=23" + } + }, + "node_modules/yargs-parser": { + "version": "22.0.0", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-22.0.0.tgz", + "integrity": "sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw==", + "license": "ISC", + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=23" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.5.0.tgz", + "integrity": "sha512-lb49vf1Xzfx080OKA0o6l8DQQpV+6Vg95zyCJX9VB/BqKYlhG7N4wgROUUHRA+ZPUefLnteQOad7z1kT2bV7bg==", + "license": "MIT" + }, + "node_modules/yargs/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, "node_modules/yauzl": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", diff --git a/package.json b/package.json index 658964de0b..bfb7b0d871 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.6.32", + "version": "0.6.33", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", @@ -137,6 +137,8 @@ "turndown-plugin-gfm": "^1.0.2", "undici": "^7.3.0", "uuid": "^9.0.1", + "vega": "^6.2.0", + "vega-lite": "^6.4.1", "vite-plugin-static-copy": "^2.2.0", "y-prosemirror": "^1.3.7", "yaml": "^2.7.1", diff --git a/pyproject.toml b/pyproject.toml index 7378d3d287..4452fff89a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,17 +6,16 @@ authors = [ ] license = { file = "LICENSE" } dependencies = [ - "fastapi==0.115.7", - "uvicorn[standard]==0.35.0", - "pydantic==2.11.7", + "fastapi==0.118.0", + "uvicorn[standard]==0.37.0", + "pydantic==2.11.9", "python-multipart==0.0.20", "itsdangerous==2.2.0", "python-socketio==5.13.0", "python-jose==3.4.0", - "passlib[bcrypt]==1.7.4", "cryptography", - "bcrypt==4.3.0", + "bcrypt==5.0.0", "argon2-cffi==25.1.0", "PyJWT[crypto]==2.10.1", "authlib==1.6.3", @@ -76,7 +75,7 @@ dependencies = [ "python-pptx==1.0.2", "unstructured==0.16.17", "nltk==3.9.1", - "Markdown==3.8.2", + "Markdown==3.9", "pypandoc==1.15", "pandas==2.2.3", "openpyxl==3.1.5", @@ -96,8 +95,8 @@ dependencies = [ "onnxruntime==1.20.1", "faster-whisper==1.1.1", - "black==25.1.0", - "youtube-transcript-api==1.1.0", + "black==25.9.0", + "youtube-transcript-api==1.2.2", "pytube==15.0.0", "pydub", @@ -107,8 +106,6 @@ dependencies = [ "google-auth-httplib2", "google-auth-oauthlib", - - "googleapis-common-protos==1.70.0", "google-cloud-storage==2.19.0", @@ -116,12 +113,6 @@ dependencies = [ "azure-storage-blob==12.24.1", "ldap3==2.9.1", - - "firecrawl-py==1.12.0", - "tencentcloud-sdk-python==3.0.1336", - - "oracledb>=3.2.0", - ] readme = "README.md" requires-python = ">= 3.11, < 3.13.0a1" @@ -155,11 +146,14 @@ all = [ "elasticsearch==9.1.0", "qdrant-client==1.14.3", - "pymilvus==2.5.0", + "pymilvus==2.6.2", "pinecone==6.0.2", "oracledb==3.2.0", "colbert-ai==0.2.21", + + "firecrawl-py==1.12.0", + "tencentcloud-sdk-python==3.0.1336", ] [project.scripts] diff --git a/src/lib/apis/functions/index.ts b/src/lib/apis/functions/index.ts index 60e88118b8..47346b4a20 100644 --- a/src/lib/apis/functions/index.ts +++ b/src/lib/apis/functions/index.ts @@ -62,6 +62,37 @@ export const getFunctions = async (token: string = '') => { return res; }; +export const getFunctionList = async (token: string = '') => { + let error = null; + + const res = await fetch(`${WEBUI_API_BASE_URL}/functions/list`, { + method: 'GET', + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json', + authorization: `Bearer ${token}` + } + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .then((json) => { + return json; + }) + .catch((err) => { + error = err.detail; + console.error(err); + return null; + }); + + if (error) { + throw error; + } + + return res; +}; + export const loadFunctionByUrl = async (token: string = '', url: string) => { let error = null; diff --git a/src/lib/components/AddConnectionModal.svelte b/src/lib/components/AddConnectionModal.svelte index 240df839a8..784b3c0453 100644 --- a/src/lib/components/AddConnectionModal.svelte +++ b/src/lib/components/AddConnectionModal.svelte @@ -17,6 +17,7 @@ import Tags from './common/Tags.svelte'; import Spinner from '$lib/components/common/Spinner.svelte'; import XMark from '$lib/components/icons/XMark.svelte'; + import Textarea from './common/Textarea.svelte'; export let onSubmit: Function = () => {}; export let onDelete: Function = () => {}; @@ -42,6 +43,8 @@ let enable = true; let apiVersion = ''; + let headers = ''; + let tags = []; let modelId = ''; @@ -69,6 +72,19 @@ // remove trailing slash from url url = url.replace(/\/$/, ''); + if (headers) { + try { + const _headers = JSON.parse(headers); + if (typeof _headers !== 'object' || Array.isArray(_headers)) { + throw new Error('Headers must be a valid JSON object'); + } + headers = JSON.stringify(_headers, null, 2); + } catch (error) { + toast.error($i18n.t('Headers must be a valid JSON object')); + return; + } + } + const res = await verifyOpenAIConnection( localStorage.token, { @@ -77,7 +93,8 @@ config: { auth_type, azure: azure, - api_version: apiVersion + api_version: apiVersion, + headers: JSON.parse(headers) } }, direct @@ -136,6 +153,19 @@ } } + if (headers) { + try { + const _headers = JSON.parse(headers); + if (typeof _headers !== 'object' || Array.isArray(_headers)) { + throw new Error('Headers must be a valid JSON object'); + } + headers = JSON.stringify(_headers, null, 2); + } catch (error) { + toast.error($i18n.t('Headers must be a valid JSON object')); + return; + } + } + // remove trailing slash from url url = url.replace(/\/$/, ''); @@ -149,6 +179,7 @@ model_ids: modelIds, connection_type: connectionType, auth_type, + headers: headers ? JSON.parse(headers) : undefined, ...(!ollama && azure ? { azure: true, api_version: apiVersion } : {}) } }; @@ -172,6 +203,9 @@ key = connection.key; auth_type = connection.config.auth_type ?? 'bearer'; + headers = connection.config?.headers + ? JSON.stringify(connection.config.headers, null, 2) + : ''; enable = connection.config?.enable ?? true; tags = connection.config?.tags ?? []; @@ -376,6 +410,35 @@ + {#if !ollama && !direct} +
+
+ + +
+ +