diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 7f603cb10c..fa82ae26a1 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -73,4 +73,4 @@
### Contributor License Agreement
-By submitting this pull request, I confirm that I have read and fully agree to the [Contributor License Agreement (CLA)](/CONTRIBUTOR_LICENSE_AGREEMENT), and I am providing my contributions under its terms.
+By submitting this pull request, I confirm that I have read and fully agree to the [Contributor License Agreement (CLA)](https://github.com/open-webui/open-webui/blob/main/CONTRIBUTOR_LICENSE_AGREEMENT), and I am providing my contributions under its terms.
diff --git a/.github/workflows/build-release.yml b/.github/workflows/build-release.yml
index 7d5e30e23e..019fbb6bae 100644
--- a/.github/workflows/build-release.yml
+++ b/.github/workflows/build-release.yml
@@ -36,7 +36,7 @@ jobs:
echo "::set-output name=content::$CHANGELOG_ESCAPED"
- name: Create GitHub release
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
@@ -61,7 +61,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Trigger Docker build workflow
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
script: |
github.rest.actions.createWorkflowDispatch({
diff --git a/.github/workflows/format-backend.yaml b/.github/workflows/format-backend.yaml
index 56074a84f4..562e6aa1c1 100644
--- a/.github/workflows/format-backend.yaml
+++ b/.github/workflows/format-backend.yaml
@@ -33,7 +33,7 @@ jobs:
- uses: actions/checkout@v5
- name: Set up Python
- uses: actions/setup-python@v5
+ uses: actions/setup-python@v6
with:
python-version: '${{ matrix.python-version }}'
diff --git a/.github/workflows/format-build-frontend.yaml b/.github/workflows/format-build-frontend.yaml
index df961ca3f5..eaa1072fbc 100644
--- a/.github/workflows/format-build-frontend.yaml
+++ b/.github/workflows/format-build-frontend.yaml
@@ -27,7 +27,7 @@ jobs:
uses: actions/checkout@v5
- name: Setup Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v5
with:
node-version: '22'
@@ -54,7 +54,7 @@ jobs:
uses: actions/checkout@v5
- name: Setup Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v5
with:
node-version: '22'
diff --git a/.github/workflows/release-pypi.yml b/.github/workflows/release-pypi.yml
index c4ae97422d..9995ccedae 100644
--- a/.github/workflows/release-pypi.yml
+++ b/.github/workflows/release-pypi.yml
@@ -21,10 +21,10 @@ jobs:
fetch-depth: 0
- name: Install Git
run: sudo apt-get update && sudo apt-get install -y git
- - uses: actions/setup-node@v4
+ - uses: actions/setup-node@v5
with:
node-version: 22
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: 3.11
- name: Build
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2af109cb38..17ed1a98e9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,146 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [0.6.31] - 2025-09-25
+
+### Added
+
+- 🔌 MCP (streamable HTTP) server support was added alongside existing OpenAPI server integration, allowing users to connect both server types through an improved server configuration interface. [#15932](https://github.com/open-webui/open-webui/issues/15932) [#16651](https://github.com/open-webui/open-webui/pull/16651), [Commit](https://github.com/open-webui/open-webui/commit/fd7385c3921eb59af76a26f4c475aedb38ce2406), [Commit](https://github.com/open-webui/open-webui/commit/777e81f7a8aca957a359d51df8388e5af4721a68), [Commit](https://github.com/open-webui/open-webui/commit/de7f7b3d855641450f8e5aac34fbae0665e0b80e), [Commit](https://github.com/open-webui/open-webui/commit/f1bbf3a91e4713039364b790e886e59b401572d0), [Commit](https://github.com/open-webui/open-webui/commit/c55afc42559c32a6f0c8beb0f1bb18e9360ab8af), [Commit](https://github.com/open-webui/open-webui/commit/61f20acf61f4fe30c0e5b0180949f6e1a8cf6524)
+- 🔐 To enable MCP server authentication, OAuth 2.1 dynamic client registration was implemented with secure automatic client registration, encrypted session management, and seamless authentication flows. [Commit](https://github.com/open-webui/open-webui/commit/972be4eda5a394c111e849075f94099c9c0dd9aa), [Commit](https://github.com/open-webui/open-webui/commit/77e971dd9fbeee806e2864e686df5ec75e82104b), [Commit](https://github.com/open-webui/open-webui/commit/879abd7feea3692a2f157da4a458d30f27217508), [Commit](https://github.com/open-webui/open-webui/commit/422d38fd114b1ebd8a7dbb114d64e14791e67d7a), [Docs:#709](https://github.com/open-webui/docs/pull/709)
+- 🛠️ External & Built-In Tools can now support rich UI element embedding ([Docs](https://docs.openwebui.com/features/plugin/tools/development)), allowing tools to return HTML content and interactive iframes that display directly within chat conversations with configurable security settings. [Commit](https://github.com/open-webui/open-webui/commit/07c5b25bc8b63173f406feb3ba183d375fedee6a), [Commit](https://github.com/open-webui/open-webui/commit/a5d8882bba7933a2c2c31c0a1405aba507c370bb), [Commit](https://github.com/open-webui/open-webui/commit/7be5b7f50f498de97359003609fc5993a172f084), [Commit](https://github.com/open-webui/open-webui/commit/a89ffccd7e96705a4a40e845289f4fcf9c4ae596)
+- 📝 Note editor now supports drag-and-drop reordering of list items with visual drag handles, making list organization more intuitive and efficient. [Commit](https://github.com/open-webui/open-webui/commit/e4e97e727e9b4971f1c363b1280ca3a101599d88), [Commit](https://github.com/open-webui/open-webui/commit/aeb5288a3c7a6e9e0a47b807cc52f870c1b7dbe6)
+- 🔍 Search modal was enhanced with quick action buttons for starting new conversations and creating notes, with intelligent content pre-population from search queries. [Commit](https://github.com/open-webui/open-webui/commit/aa6f63a335e172fec1dc94b2056541f52c1167a6), [Commit](https://github.com/open-webui/open-webui/commit/612a52d7bb7dbe9fa0bbbc8ac0a552d2b9801146), [Commit](https://github.com/open-webui/open-webui/commit/b03529b006f3148e895b1094584e1ab129ecac5b)
+- 🛠️ Tool user valve configuration interface was added to the integrations menu, displaying clickable gear icon buttons with tooltips for tools that support user-specific settings, making personal tool configurations easily accessible. [Commit](https://github.com/open-webui/open-webui/commit/27d61307cdce97ed11a05ec13fc300249d6022cd)
+- 👥 Channel access control was enhanced to require write permissions for posting, editing, and deleting messages, while read-only users can view content but cannot contribute. [#17543](https://github.com/open-webui/open-webui/pull/17543)
+- 💬 Channel models now support image processing, allowing AI assistants to view and analyze images shared in conversation threads. [Commit](https://github.com/open-webui/open-webui/commit/9f0010e234a6f40782a66021435d3c02b9c23639)
+- 🌐 Attach Webpage button was added to the message input menu, providing a user-friendly modal interface for attaching web content and YouTube videos as an alternative to the existing URL syntax. [#17534](https://github.com/open-webui/open-webui/pull/17534)
+- 🔐 Redis session storage support was added for OAuth redirects, providing better state handling in multi-pod Kubernetes deployments and resolving CSRF mismatch errors. [#17223](https://github.com/open-webui/open-webui/pull/17223), [#15373](https://github.com/open-webui/open-webui/issues/15373)
+- 🔍 Ollama Cloud web search integration was added as a new search engine option, providing access to web search functionality through Ollama's cloud infrastructure. [Commit](https://github.com/open-webui/open-webui/commit/e06489d92baca095b8f376fbef223298c7772579), [Commit](https://github.com/open-webui/open-webui/commit/4b6d34438bcfc45463dc7a9cb984794b32c1f0a1), [Commit](https://github.com/open-webui/open-webui/commit/05c46008da85357dc6890b846789dfaa59f4a520), [Commit](https://github.com/open-webui/open-webui/commit/fe65fe0b97ec5a8fff71592ff04a25c8e123d108), [Docs:#708](https://github.com/open-webui/docs/pull/708)
+- 🔍 Perplexity Websearch API integration was added as a new search engine option, providing access to the new websearch functionality provided by Perplexity. [#17756](https://github.com/open-webui/open-webui/issues/17756), [Commit](https://github.com/open-webui/open-webui/pull/17747/commits/7f411dd5cc1c29733216f79e99eeeed0406a2afe)
+- ☁️ OneDrive integration was improved to support separate client IDs for personal and business authentication, enabling both integrations to work simultaneously. [#17619](https://github.com/open-webui/open-webui/pull/17619), [Docs](https://docs.openwebui.com/tutorials/integrations/onedrive-sharepoint), [Docs](https://docs.openwebui.com/getting-started/env-configuration/#onedrive)
+- 📝 Pending user overlay content now supports markdown formatting, enabling rich text display for custom messages similar to banner functionality. [#17681](https://github.com/open-webui/open-webui/pull/17681)
+- 🎨 Image generation model selection was centralized to enable dynamic model override in function calls, allowing pipes and tools to specify different models than the global default while maintaining backward compatibility. [#17689](https://github.com/open-webui/open-webui/pull/17689)
+- 🎨 Interface design was modernized with updated visual styling, improved spacing, and refined component layouts across modals, sidebar, settings, and navigation elements. [Commit](https://github.com/open-webui/open-webui/commit/27a91cc80a24bda0a3a188bc3120a8ab57b00881), [Commit](https://github.com/open-webui/open-webui/commit/4ad743098615f9c58daa9df392f31109aeceeb16), [Commit](https://github.com/open-webui/open-webui/commit/fd7385c3921eb59af76a26f4c475aedb38ce2406)
+- 📊 Notes query performance was optimized through database-level filtering and separated access control logic, reducing memory usage and eliminating N+1 query problems for better scalability. [#17607](https://github.com/open-webui/open-webui/pull/17607) [Commit](https://github.com/open-webui/open-webui/pull/17747/commits/da661756fa7eec754270e6dd8c67cbf74a28a17f)
+- ⚡ Page loading performance was optimized by deferring API requests until components are actually opened, including ChangelogModal, ModelSelector, RecursiveFolder, ArchivedChatsModal, and SearchModal. [#17542](https://github.com/open-webui/open-webui/pull/17542), [#17555](https://github.com/open-webui/open-webui/pull/17555), [#17557](https://github.com/open-webui/open-webui/pull/17557), [#17541](https://github.com/open-webui/open-webui/pull/17541), [#17640](https://github.com/open-webui/open-webui/pull/17640)
+- ⚡ Bundle size was reduced by 1.58MB through optimized highlight.js language support, improving page loading speed and reducing bandwidth usage. [#17645](https://github.com/open-webui/open-webui/pull/17645)
+- ⚡ Editor collaboration functionality was refactored to reduce package size by 390KB and minimize compilation errors, improving build performance and reliability. [#17593](https://github.com/open-webui/open-webui/pull/17593)
+- ♿ Enhanced user interface accessibility through the addition of unique element IDs, improving targeting for testing, styling, and assistive technologies while providing better semantic markup for screen readers and accessibility tools. [#17746](https://github.com/open-webui/open-webui/pull/17746)
+- 🔄 Various improvements were implemented across the frontend and backend to enhance performance, stability, and security.
+- 🌐 Translations for Portuguese (Brazil), Chinese (Simplified and Traditional), Korean, Irish, Spanish, Finnish, French, Kabyle, Russian, and Catalan were enhanced and improved.
+
+### Fixed
+
+- 🛡️ SVG content security was enhanced by implementing DOMPurify sanitization to prevent XSS attacks through malicious SVG elements, ensuring safe rendering of user-generated SVG content. [Commit](https://github.com/open-webui/open-webui/pull/17747/commits/750a659a9fee7687e667d9d755e17b8a0c77d557)
+- ☁️ OneDrive attachment menu rendering issues were resolved by restructuring the submenu interface from dropdown to tabbed navigation, preventing menu items from being hidden or clipped due to overflow constraints. [#17554](https://github.com/open-webui/open-webui/issues/17554), [Commit](https://github.com/open-webui/open-webui/pull/17747/commits/90e4b49b881b644465831cc3028bb44f0f7a2196)
+- 💬 Attached conversation references now persist throughout the entire chat session, ensuring models can continue querying referenced conversations after multiple conversation turns. [#17750](https://github.com/open-webui/open-webui/issues/17750)
+- 🔍 Search modal text box focus issues after pinning or unpinning chats were resolved, allowing users to properly exit the search interface by clicking outside the text box. [#17743](https://github.com/open-webui/open-webui/issues/17743)
+- 🔍 Search function chat list is now properly updated in real-time when chats are created or deleted, eliminating stale search results and preview loading failures. [#17741](https://github.com/open-webui/open-webui/issues/17741)
+- 💬 Chat jitter and delayed code block expansion in multi-model sessions were resolved by reverting dynamic CodeEditor loading, restoring stable rendering behavior. [#17715](https://github.com/open-webui/open-webui/pull/17715), [#17684](https://github.com/open-webui/open-webui/issues/17684)
+- 📎 File upload handling was improved to properly recognize uploaded files even when no accompanying text message is provided, resolving issues where attachments were ignored in custom prompts. [#17492](https://github.com/open-webui/open-webui/issues/17492)
+- 💬 Chat conversation referencing within projects was restored by including foldered chats in the reference menu, allowing users to properly quote conversations from within their project scope. [#17530](https://github.com/open-webui/open-webui/issues/17530)
+- 🔍 RAG query generation is now skipped when all attached files are set to full context mode, preventing unnecessary retrieval operations and improving system efficiency. [#17744](https://github.com/open-webui/open-webui/pull/17744)
+- 💾 Memory leaks in file handling and HTTP connections are prevented through proper resource cleanup, ensuring stable memory usage during large file downloads and processing operations. [#17608](https://github.com/open-webui/open-webui/pull/17608)
+- 🔐 OAuth access token refresh errors are resolved by properly implementing async/await patterns, preventing "coroutine object has no attribute get" failures during token expiry. [#17585](https://github.com/open-webui/open-webui/issues/17585), [#17678](https://github.com/open-webui/open-webui/issues/17678)
+- ⚙️ Valve behavior was improved to properly handle default values and array types, ensuring only explicitly set values are persisted while maintaining consistent distinction between custom and default valve states. [#17664](https://github.com/open-webui/open-webui/pull/17664)
+- 🔍 Hybrid search functionality was enhanced to handle inconsistent parameter types and prevent failures when collection results are None, empty, or in unexpected formats. [#17617](https://github.com/open-webui/open-webui/pull/17617)
+- 📁 Empty folder deletion is now allowed regardless of chat deletion permission restrictions, resolving cases where users couldn't remove folders after deleting all contained chats. [#17683](https://github.com/open-webui/open-webui/pull/17683)
+- 📝 Rich text editor console errors were resolved by adding proper error handling when the TipTap editor view is not available or not yet mounted. [#17697](https://github.com/open-webui/open-webui/issues/17697)
+- 🗒️ Hidden models are now properly excluded from the notes section dropdown and default model selection, preventing users from accessing models they shouldn't see. [#17722](https://github.com/open-webui/open-webui/pull/17722)
+- 🖼️ AI-generated image download filenames now use a clean, translatable "Generated Image" format instead of potentially problematic response text, improving file management and compatibility. [#17721](https://github.com/open-webui/open-webui/pull/17721)
+- 🎨 Toggle switch display issues in the Integrations interface are fixed, preventing background highlighting and obscuring on hover. [#17564](https://github.com/open-webui/open-webui/issues/17564)
+
+### Changed
+
+- 👥 Channel permissions now require write access for message posting, editing, and deletion, with existing user groups defaulting to read-only access requiring manual admin migration to write permissions for full participation.
+- ☁️ OneDrive environment variable configuration was updated to use separate ONEDRIVE_CLIENT_ID_PERSONAL and ONEDRIVE_CLIENT_ID_BUSINESS variables for better client ID separation, while maintaining backward compatibility with the legacy ONEDRIVE_CLIENT_ID variable. [Docs](https://docs.openwebui.com/tutorials/integrations/onedrive-sharepoint), [Docs](https://docs.openwebui.com/getting-started/env-configuration/#onedrive)
+
+## [0.6.30] - 2025-09-17
+
+### Added
+
+- 🔑 Microsoft Entra ID authentication type support was added for Azure OpenAI connections, enabling enhanced security and streamlined authentication workflows.
+
+### Fixed
+
+- ☁️ OneDrive integration was fixed after recent breakage, restoring reliable account connectivity and file access.
+
+## [0.6.29] - 2025-09-17
+
+### Added
+
+- 🎨 The chat input menu has been completely overhauled with a revolutionary new design, consolidating attachments under a unified '+' button, organizing integrations into a streamlined options menu, and introducing powerful, interactive selectors for attaching chats, notes, and knowledge base items. [Commit](https://github.com/open-webui/open-webui/commit/a68342d5a887e36695e21f8c2aec593b159654ff), [Commit](https://github.com/open-webui/open-webui/commit/96b8aaf83ff341fef432649366bc5155bac6cf20), [Commit](https://github.com/open-webui/open-webui/commit/4977e6d50f7b931372c96dd5979ca635d58aeb78), [Commit](https://github.com/open-webui/open-webui/commit/d973db829f7ec98b8f8fe7d3b2822d588e79f94e), [Commit](https://github.com/open-webui/open-webui/commit/d4c628de09654df76653ad9bce9cb3263e2f27c8), [Commit](https://github.com/open-webui/open-webui/commit/cd740f436db4ea308dbede14ef7ff56e8126f51b), [Commit](https://github.com/open-webui/open-webui/commit/5c2db102d06b5c18beb248d795682ff422e9b6d1), [Commit](https://github.com/open-webui/open-webui/commit/031cf38655a1a2973194d2eaa0fbbd17aca8ee92), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/3ed0a6d11fea1a054e0bc8aa8dfbe417c7c53e51), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/eadec9e86e01bc8f9fb90dfe7a7ae4fc3bfa6420), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/c03ca7270e64e3a002d321237160c0ddaf2bb129), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/b53ddfbd19aa94e9cbf7210acb31c3cfafafa5fe), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/c923461882fcde30ae297a95e91176c95b9b72e1)
+- 🤖 AI models can now be mentioned in channels to automatically generate responses, enabling multi-model conversations where mentioned models participate directly in threaded discussions with full context awareness. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/4fe97d8794ee18e087790caab9e5d82886006145)
+- 💬 The Channels feature now utilizes the modern rich text editor, including support for '/', '@', and '#' command suggestions. [Commit](https://github.com/open-webui/open-webui/commit/06c1426e14ac0dfaf723485dbbc9723a4d89aba9), [Commit](https://github.com/open-webui/open-webui/commit/02f7c3258b62970ce79716f75d15467a96565054)
+- 📎 Channel message input now supports direct paste functionality for images and files from the clipboard, streamlining content sharing workflows. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/6549fc839f86c40c26c2ef4dedcaf763a9304418)
+- ⚙️ Models can now be configured with default features (Web Search, Image Generation) and filters that automatically activate when a user selects the model. [Commit](https://github.com/open-webui/open-webui/commit/9a555478273355a5177bfc7f7211c64778e4c8de), [Commit](https://github.com/open-webui/open-webui/commit/384a53b339820068e92f7eaea0d9f3e0536c19c2), [Commit](https://github.com/open-webui/open-webui/commit/d7f43bfc1a30c065def8c50d77c2579c1a3c5c67), [Commit](https://github.com/open-webui/open-webui/commit/6a67a2217cc5946ad771e479e3a37ac213210748)
+- 💬 The ability to reference other chats as context within a conversation was added via the attachment menu. [Commit](https://github.com/open-webui/open-webui/commit/e097bbdf11ae4975c622e086df00d054291cdeb3), [Commit](https://github.com/open-webui/open-webui/commit/f3cd2ffb18e7dedbe88430f9ae7caa6b3cfd79d0), [Commit](https://github.com/open-webui/open-webui/commit/74263c872c5d574a9bb0944d7984f748dc772dba), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/aa8ab349ed2fcb46d1cf994b9c0de2ec2ea35d0d), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/025eef754f0d46789981defd473d001e3b1d0ca2)
+- 🎨 The command suggestion UI for prompts ('/'), models ('@'), and knowledge ('#') was completely overhauled with a more responsive and keyboard-navigable interface. [Commit](https://github.com/open-webui/open-webui/commit/6b69c4da0fb9329ccf7024483960e070cf52ccab), [Commit](https://github.com/open-webui/open-webui/commit/06a6855f844456eceaa4d410c93379460e208202), [Commit](https://github.com/open-webui/open-webui/commit/c55f5578280b936cf581a743df3703e3db1afd54), [Commit](https://github.com/open-webui/open-webui/commit/f68d1ba394d4423d369f827894cde99d760b2402)
+- 👥 User and channel suggestions were added to the mention system, enabling '@' mentions for users and models, and '#' mentions for channels with searchable user lookup and clickable navigation. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/bbd1d2b58c89b35daea234f1fc9208f2af840899), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/aef1e06f0bb72065a25579c982dd49157e320268), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/779db74d7e9b7b00d099b7d65cfbc8a831e74690)
+- 📁 Folder functionality was enhanced with custom background image support, improved drag-and-drop capabilities for moving folders to root level, and better menu interactions. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/2a234829f5dfdfde27fdfd30591caa908340efb4), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/2b1ee8b0dc5f7c0caaafdd218f20705059fa72e2), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/b1e5bc8e490745f701909c19b6a444b67c04660e), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/3e584132686372dfeef187596a7c557aa5f48308)
+- ☁️ OneDrive integration configuration now supports selecting between personal and work/school account types via ENABLE_ONEDRIVE_PERSONAL and ENABLE_ONEDRIVE_BUSINESS environment variables. [#17354](https://github.com/open-webui/open-webui/pull/17354), [Commit](https://github.com/open-webui/open-webui/commit/e1e3009a30f9808ce06582d81a60e391f5ca09ec), [Docs:#697](https://github.com/open-webui/docs/pull/697)
+- ⚡ Mermaid.js is now dynamically loaded on demand, significantly reducing first-screen loading time and improving initial page performance. [#17476](https://github.com/open-webui/open-webui/issues/17476), [#17477](https://github.com/open-webui/open-webui/pull/17477)
+- ⚡ Azure MSAL browser library is now dynamically loaded on demand, reducing initial bundle size by 730KB and improving first-screen loading speed. [#17479](https://github.com/open-webui/open-webui/pull/17479)
+- ⚡ CodeEditor component is now dynamically loaded on demand, reducing initial bundle size by 1MB and improving first-screen loading speed. [#17498](https://github.com/open-webui/open-webui/pull/17498)
+- ⚡ Hugging Face Transformers library is now dynamically loaded on demand, reducing initial bundle size by 1.9MB and improving first-screen loading speed. [#17499](https://github.com/open-webui/open-webui/pull/17499)
+- ⚡ jsPDF and html2canvas-pro libraries are now dynamically loaded on demand, reducing initial bundle size by 980KB and improving first-screen loading speed. [#17502](https://github.com/open-webui/open-webui/pull/17502)
+- ⚡ Leaflet mapping library is now dynamically loaded on demand, reducing initial bundle size by 454KB and improving first-screen loading speed. [#17503](https://github.com/open-webui/open-webui/pull/17503)
+- 📊 OpenTelemetry metrics collection was enhanced to properly handle HTTP 500 errors and ensure metrics are recorded even during exceptions. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/b14617a653c6bdcfd3102c12f971924fd1faf572)
+- 🔒 OAuth token retrieval logic was refactored, improving the reliability and consistency of authentication handling across the backend. [Commit](https://github.com/open-webui/open-webui/commit/6c0a5fa91cdbf6ffb74667ee61ca96bebfdfbc50)
+- 💻 Code block output processing was improved to handle Python execution results more reliably, along with refined visual styling and button layouts. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/0e5320c39e308ff97f2ca9e289618af12479eb6e)
+- ⚡ Message input processing was optimized to skip unnecessary text variable handling when input is empty, improving performance. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/e1386fe80b77126a12dabc4ad058abe9b024b275)
+- 📄 Individual chat PDF export was added to the sidebar chat menu, allowing users to export single conversations as PDF documents with both stylized and plain text options. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/d041d58bb619689cd04a391b4f8191b23941ca62)
+- 🛠️ Function validation was enhanced with improved valve validation and better error handling during function loading and synchronization. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/e66e0526ed6a116323285f79f44237538b6c75e6), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/8edfd29102e0a61777b23d3575eaa30be37b59a5)
+- 🔔 Notification toast interaction was enhanced with drag detection to prevent accidental clicks and added keyboard support for accessibility. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/621e7679c427b6f0efa85f95235319238bf171ad)
+- 🗓️ Improved date and time formatting dynamically adapts to the selected language, ensuring consistent localization across the UI. [#17409](https://github.com/open-webui/open-webui/pull/17409), [Commit](https://github.com/open-webui/open-webui/commit/2227f24bd6d861b1fad8d2cabacf7d62ce137d0c)
+- 🔒 Feishu SSO integration was added, allowing users to authenticate via Feishu. [#17284](https://github.com/open-webui/open-webui/pull/17284), [Docs:#685](https://github.com/open-webui/docs/pull/685)
+- 🔠 Toggle filters in the chat input options menu are now sorted alphabetically for easier navigation. [Commit](https://github.com/open-webui/open-webui/commit/ca853ca4656180487afcd84230d214f91db52533)
+- 🎨 Long chat titles in the sidebar are now truncated to prevent text overflow and maintain a clean layout. [#17356](https://github.com/open-webui/open-webui/pull/17356)
+- 🎨 Temporary chat interface design was refined with improved layout and visual consistency. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/67549dcadd670285d491bd41daf3d081a70fd094), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/2ca34217e68f3b439899c75881dfb050f49c9eb2), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/fb02ec52a5df3f58b53db4ab3a995c15f83503cd)
+- 🎨 Download icon consistency was improved across the entire interface by standardizing the icon component used in menus, functions, tools, and export features. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/596be451ece7e11b5cd25465d49670c27a1cb33f)
+- 🎨 Settings interface was enhanced with improved iconography and reorganized the 'Chats' section into 'Data Controls' for better clarity. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/8bf0b40fdd978b5af6548a6e1fb3aabd90bcd5cd)
+- 🔄 Various improvements were implemented across the frontend and backend to enhance performance, stability, and security.
+- 🌐 Translations for Finnish, German, Kabyle, Portuguese (Brazil), Simplified Chinese, Spanish (Spain), and Traditional Chinese (Taiwan) were enhanced and expanded.
+
+### Fixed
+
+- 📚 Knowledge base permission logic was corrected to ensure private collection owners can access their own content when embedding bypass is enabled. [#17432](https://github.com/open-webui/open-webui/issues/17432), [Commit](https://github.com/open-webui/open-webui/commit/a51f0c30ec1472d71487eab3e15d0351a2716b12)
+- ⚙️ Connection URL editing in Admin Settings now properly saves changes instead of reverting to original values, fixing issues with both Ollama and OpenAI-compatible endpoints. [#17435](https://github.com/open-webui/open-webui/issues/17435), [Commit](https://github.com/open-webui/open-webui/commit/e4c864de7eb0d577843a80688677ce3659d1f81f)
+- 📊 Usage information collection from Google models was corrected to handle providers that send usage data alongside content chunks instead of separately. [#17421](https://github.com/open-webui/open-webui/pull/17421), [Commit](https://github.com/open-webui/open-webui/commit/c2f98a4cd29ed738f395fef09c42ab8e73cd46a0)
+- ⚙️ Settings modal scrolling issue was resolved by moving image compression controls to a dedicated modal, preventing the main settings from becoming scrollable out of view. [#17474](https://github.com/open-webui/open-webui/issues/17474), [Commit](https://github.com/open-webui/open-webui/commit/fed5615c19b0045a55b0be426b468a57bfda4b66)
+- 📁 Folder click behavior was improved to prevent accidental actions by implementing proper double-click detection and timing delays for folder expansion and selection. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/19e3214997170eea6ee92452e8c778e04a28e396)
+- 🔐 Access control component reliability was improved with better null checking and error handling for group permissions and private access scenarios. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/c8780a7f934c5e49a21b438f2f30232f83cf75d2), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/32015c392dbc6b7367a6a91d9e173e675ea3402c)
+- 🔗 The citation modal now correctly displays and links to external web page sources in addition to internal documents. [Commit](https://github.com/open-webui/open-webui/commit/9208a84185a7e59524f00a7576667d493c3ac7d4)
+- 🔗 Web and YouTube attachment handling was fixed, ensuring their content is now reliably processed and included in the chat context for retrieval. [Commit](https://github.com/open-webui/open-webui/commit/210197fd438b52080cda5d6ce3d47b92cdc264c8)
+- 📂 Large file upload failures are resolved by correcting the processing logic for scenarios where document embedding is bypassed. [Commit](https://github.com/open-webui/open-webui/commit/051b6daa8299fd332503bd584563556e2ae6adab)
+- 🌐 Rich text input placeholder text now correctly updates when the interface language is switched, ensuring proper localization. [#17473](https://github.com/open-webui/open-webui/pull/17473), [Commit](https://github.com/open-webui/open-webui/commit/77358031f5077e6efe5cc08d8d4e5831c7cd1cd9)
+- 📊 Llama.cpp server timing metrics are now correctly parsed and displayed by fixing a typo in the response handling. [#17350](https://github.com/open-webui/open-webui/issues/17350), [Commit](https://github.com/open-webui/open-webui/commit/cf72f5503f39834b9da44ebbb426a3674dad0caa)
+- 🛠️ Filter functions with file_handler configuration now properly handle messages without file attachments, preventing runtime errors. [#17423](https://github.com/open-webui/open-webui/pull/17423)
+- 🔔 Channel notification delivery was fixed to properly handle background task execution and user access checking. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/1077b2ac8b96e49c2ad2620e76eb65bbb2a3a1f3)
+
+### Changed
+
+- 📝 Prompt template variables are now optional by default instead of being forced as required, allowing flexible workflows with optional metadata fields. [#17447](https://github.com/open-webui/open-webui/issues/17447), [Commit](https://github.com/open-webui/open-webui/commit/d5824b1b495fcf86e57171769bcec2a0f698b070), [Docs:#696](https://github.com/open-webui/docs/pull/696)
+- 🛠️ Direct external tool servers now require explicit user selection from the input interface instead of being automatically included in conversations, providing better control over tool usage. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/0f04227c34ca32746c43a9323e2df32299fcb6af), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/99bba12de279dd55c55ded35b2e4f819af1c9ab5)
+- 📺 Widescreen mode option was removed from Channels interface, with all channel layouts now using full-width display. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/d46b7b8f1b99a8054b55031fe935c8a16d5ec956)
+- 🎛️ The plain textarea input option was deprecated, and the custom text editor is now the standard for all chat inputs. [Commit](https://github.com/open-webui/open-webui/commit/153afd832ccd12a1e5fd99b085008d080872c161)
+
+## [0.6.28] - 2025-09-10
+
+### Added
+
+- 🔍 The "@" command for model selection now supports real-time search and filtering, improving usability and aligning its behavior with other input commands. [#17307](https://github.com/open-webui/open-webui/issues/17307), [Commit](https://github.com/open-webui/open-webui/commit/f2a09c71499489ee71599af4a179e7518aaf658b)
+- 🛠️ External tool server data handling is now more robust, automatically attempting to parse specifications as JSON before falling back to YAML, regardless of the URL extension. [Commit](https://github.com/open-webui/open-webui/commit/774c0056bde88ed4831422efa81506488e3d6641)
+- 🎯 The "Title" field is now automatically focused when creating a new chat folder, streamlining the folder creation process. [#17315](https://github.com/open-webui/open-webui/issues/17315), [Commit](https://github.com/open-webui/open-webui/commit/c51a651a2d5e2a27546416666812e9b92205562d)
+- 🔄 Various improvements were implemented across the frontend and backend to enhance performance, stability, and security.
+- 🌐 Brazilian Portuguese and Simplified Chinese translations were expanded and refined.
+
+### Fixed
+
+- 🔊 A regression affecting Text-to-Speech for local providers using the OpenAI engine was fixed by reverting a URL joining change. [#17316](https://github.com/open-webui/open-webui/issues/17316), [Commit](https://github.com/open-webui/open-webui/commit/8339f59cdfc63f2d58c8e26933d1bf1438479d75)
+- 🪧 A regression was fixed where the input modal for prompts with placeholders would not open, causing the raw prompt text to be pasted into the chat input field instead. [#17325](https://github.com/open-webui/open-webui/issues/17325), [Commit](https://github.com/open-webui/open-webui/commit/d5cb65527eaa4831459a4c7dbf187daa9c0525ae)
+- 🔑 An issue was resolved where modified connection keys in the OpenAIConnection component did not take effect. [#17324](https://github.com/open-webui/open-webui/pull/17324)
+
## [0.6.27] - 2025-09-09
### Added
diff --git a/LICENSE_NOTICE b/LICENSE_NOTICE
new file mode 100644
index 0000000000..4e00d46d9a
--- /dev/null
+++ b/LICENSE_NOTICE
@@ -0,0 +1,11 @@
+# Open WebUI Multi-License Notice
+
+This repository contains code governed by multiple licenses based on the date and origin of contribution:
+
+1. All code committed prior to commit a76068d69cd59568b920dfab85dc573dbbb8f131 is licensed under the MIT License (see LICENSE_HISTORY).
+
+2. All code committed from commit a76068d69cd59568b920dfab85dc573dbbb8f131 up to and including commit 60d84a3aae9802339705826e9095e272e3c83623 is licensed under the BSD 3-Clause License (see LICENSE_HISTORY).
+
+3. All code contributed or modified after commit 60d84a3aae9802339705826e9095e272e3c83623 is licensed under the Open WebUI License (see LICENSE).
+
+For details on which commits are covered by which license, refer to LICENSE_HISTORY.
diff --git a/README.md b/README.md
index 9b01496d9f..49c0a8d9d3 100644
--- a/README.md
+++ b/README.md
@@ -248,7 +248,7 @@ Discover upcoming features on our roadmap in the [Open WebUI Documentation](http
## License 📜
-This project is licensed under the [Open WebUI License](LICENSE), a revised BSD-3-Clause license. You receive all the same rights as the classic BSD-3 license: you can use, modify, and distribute the software, including in proprietary and commercial products, with minimal restrictions. The only additional requirement is to preserve the "Open WebUI" branding, as detailed in the LICENSE file. For full terms, see the [LICENSE](LICENSE) document. 📄
+This project contains code under multiple licenses. The current codebase includes components licensed under the Open WebUI License with an additional requirement to preserve the "Open WebUI" branding, as well as prior contributions under their respective original licenses. For a detailed record of license changes and the applicable terms for each section of the code, please refer to [LICENSE_HISTORY](./LICENSE_HISTORY). For complete and updated licensing details, please see the [LICENSE](./LICENSE) and [LICENSE_HISTORY](./LICENSE_HISTORY) files.
## Support 💬
diff --git a/backend/dev.sh b/backend/dev.sh
index 504b8f7554..042fbd9efa 100755
--- a/backend/dev.sh
+++ b/backend/dev.sh
@@ -1,3 +1,3 @@
-export CORS_ALLOW_ORIGIN="http://localhost:5173"
+export CORS_ALLOW_ORIGIN="http://localhost:5173;http://localhost:8080"
PORT="${PORT:-8080}"
uvicorn open_webui.main:app --port $PORT --host 0.0.0.0 --forwarded-allow-ips '*' --reload
diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py
index 11698d87af..7e5c35a451 100644
--- a/backend/open_webui/config.py
+++ b/backend/open_webui/config.py
@@ -222,10 +222,11 @@ class PersistentConfig(Generic[T]):
class AppConfig:
- _state: dict[str, PersistentConfig]
_redis: Union[redis.Redis, redis.cluster.RedisCluster] = None
_redis_key_prefix: str
+ _state: dict[str, PersistentConfig]
+
def __init__(
self,
redis_url: Optional[str] = None,
@@ -233,9 +234,8 @@ class AppConfig:
redis_cluster: Optional[bool] = False,
redis_key_prefix: str = "open-webui",
):
- super().__setattr__("_state", {})
- super().__setattr__("_redis_key_prefix", redis_key_prefix)
if redis_url:
+ super().__setattr__("_redis_key_prefix", redis_key_prefix)
super().__setattr__(
"_redis",
get_redis_connection(
@@ -246,6 +246,8 @@ class AppConfig:
),
)
+ super().__setattr__("_state", {})
+
def __setattr__(self, key, value):
if isinstance(value, PersistentConfig):
self._state[key] = value
@@ -513,6 +515,30 @@ OAUTH_GROUPS_CLAIM = PersistentConfig(
os.environ.get("OAUTH_GROUPS_CLAIM", os.environ.get("OAUTH_GROUP_CLAIM", "groups")),
)
+FEISHU_CLIENT_ID = PersistentConfig(
+ "FEISHU_CLIENT_ID",
+ "oauth.feishu.client_id",
+ os.environ.get("FEISHU_CLIENT_ID", ""),
+)
+
+FEISHU_CLIENT_SECRET = PersistentConfig(
+ "FEISHU_CLIENT_SECRET",
+ "oauth.feishu.client_secret",
+ os.environ.get("FEISHU_CLIENT_SECRET", ""),
+)
+
+FEISHU_OAUTH_SCOPE = PersistentConfig(
+ "FEISHU_OAUTH_SCOPE",
+ "oauth.feishu.scope",
+ os.environ.get("FEISHU_OAUTH_SCOPE", "contact:user.base:readonly"),
+)
+
+FEISHU_REDIRECT_URI = PersistentConfig(
+ "FEISHU_REDIRECT_URI",
+ "oauth.feishu.redirect_uri",
+ os.environ.get("FEISHU_REDIRECT_URI", ""),
+)
+
ENABLE_OAUTH_ROLE_MANAGEMENT = PersistentConfig(
"ENABLE_OAUTH_ROLE_MANAGEMENT",
"oauth.enable_role_mapping",
@@ -705,6 +731,33 @@ def load_oauth_providers():
"register": oidc_oauth_register,
}
+ if FEISHU_CLIENT_ID.value and FEISHU_CLIENT_SECRET.value:
+
+ def feishu_oauth_register(client: OAuth):
+ client.register(
+ name="feishu",
+ client_id=FEISHU_CLIENT_ID.value,
+ client_secret=FEISHU_CLIENT_SECRET.value,
+ access_token_url="https://open.feishu.cn/open-apis/authen/v2/oauth/token",
+ authorize_url="https://accounts.feishu.cn/open-apis/authen/v1/authorize",
+ api_base_url="https://open.feishu.cn/open-apis",
+ userinfo_endpoint="https://open.feishu.cn/open-apis/authen/v1/user_info",
+ client_kwargs={
+ "scope": FEISHU_OAUTH_SCOPE.value,
+ **(
+ {"timeout": int(OAUTH_TIMEOUT.value)}
+ if OAUTH_TIMEOUT.value
+ else {}
+ ),
+ },
+ redirect_uri=FEISHU_REDIRECT_URI.value,
+ )
+
+ OAUTH_PROVIDERS["feishu"] = {
+ "register": feishu_oauth_register,
+ "sub_claim": "user_id",
+ }
+
configured_providers = []
if GOOGLE_CLIENT_ID.value:
configured_providers.append("Google")
@@ -712,6 +765,8 @@ def load_oauth_providers():
configured_providers.append("Microsoft")
if GITHUB_CLIENT_ID.value:
configured_providers.append("GitHub")
+ if FEISHU_CLIENT_ID.value:
+ configured_providers.append("Feishu")
if configured_providers and not OPENID_PROVIDER_URL.value:
provider_list = ", ".join(configured_providers)
@@ -2116,10 +2171,20 @@ ENABLE_ONEDRIVE_INTEGRATION = PersistentConfig(
os.getenv("ENABLE_ONEDRIVE_INTEGRATION", "False").lower() == "true",
)
-ONEDRIVE_CLIENT_ID = PersistentConfig(
- "ONEDRIVE_CLIENT_ID",
- "onedrive.client_id",
- os.environ.get("ONEDRIVE_CLIENT_ID", ""),
+
+ENABLE_ONEDRIVE_PERSONAL = (
+ os.environ.get("ENABLE_ONEDRIVE_PERSONAL", "True").lower() == "true"
+)
+ENABLE_ONEDRIVE_BUSINESS = (
+ os.environ.get("ENABLE_ONEDRIVE_BUSINESS", "True").lower() == "true"
+)
+
+ONEDRIVE_CLIENT_ID = os.environ.get("ONEDRIVE_CLIENT_ID", "")
+ONEDRIVE_CLIENT_ID_PERSONAL = os.environ.get(
+ "ONEDRIVE_CLIENT_ID_PERSONAL", ONEDRIVE_CLIENT_ID
+)
+ONEDRIVE_CLIENT_ID_BUSINESS = os.environ.get(
+ "ONEDRIVE_CLIENT_ID_BUSINESS", ONEDRIVE_CLIENT_ID
)
ONEDRIVE_SHAREPOINT_URL = PersistentConfig(
@@ -2702,6 +2767,12 @@ WEB_SEARCH_TRUST_ENV = PersistentConfig(
)
+OLLAMA_CLOUD_WEB_SEARCH_API_KEY = PersistentConfig(
+ "OLLAMA_CLOUD_WEB_SEARCH_API_KEY",
+ "rag.web.search.ollama_cloud_api_key",
+ os.getenv("OLLAMA_CLOUD_API_KEY", ""),
+)
+
SEARXNG_QUERY_URL = PersistentConfig(
"SEARXNG_QUERY_URL",
"rag.web.search.searxng_query_url",
diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py
index b4fdc97d82..e02424f969 100644
--- a/backend/open_webui/env.py
+++ b/backend/open_webui/env.py
@@ -474,6 +474,10 @@ ENABLE_OAUTH_ID_TOKEN_COOKIE = (
os.environ.get("ENABLE_OAUTH_ID_TOKEN_COOKIE", "True").lower() == "true"
)
+OAUTH_CLIENT_INFO_ENCRYPTION_KEY = os.environ.get(
+ "OAUTH_CLIENT_INFO_ENCRYPTION_KEY", WEBUI_SECRET_KEY
+)
+
OAUTH_SESSION_TOKEN_ENCRYPTION_KEY = os.environ.get(
"OAUTH_SESSION_TOKEN_ENCRYPTION_KEY", WEBUI_SECRET_KEY
)
@@ -547,16 +551,16 @@ else:
CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = os.environ.get(
- "CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES", "10"
+ "CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES", "30"
)
if CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES == "":
- CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 10
+ CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 30
else:
try:
CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = int(CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES)
except Exception:
- CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 10
+ CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 30
####################################
diff --git a/backend/open_webui/functions.py b/backend/open_webui/functions.py
index 4122cbbe0d..d102263cb3 100644
--- a/backend/open_webui/functions.py
+++ b/backend/open_webui/functions.py
@@ -19,6 +19,7 @@ from fastapi import (
from starlette.responses import Response, StreamingResponse
+from open_webui.constants import ERROR_MESSAGES
from open_webui.socket.main import (
get_event_call,
get_event_emitter,
@@ -60,8 +61,20 @@ def get_function_module_by_id(request: Request, pipe_id: str):
function_module, _, _ = get_function_module_from_cache(request, pipe_id)
if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
+ Valves = function_module.Valves
valves = Functions.get_function_valves_by_id(pipe_id)
- function_module.valves = function_module.Valves(**(valves if valves else {}))
+
+ if valves:
+ try:
+ function_module.valves = Valves(
+ **{k: v for k, v in valves.items() if v is not None}
+ )
+ except Exception as e:
+ log.exception(f"Error loading valves for function {pipe_id}: {e}")
+ raise e
+ else:
+ function_module.valves = Valves()
+
return function_module
@@ -70,65 +83,69 @@ async def get_function_models(request):
pipe_models = []
for pipe in pipes:
- function_module = get_function_module_by_id(request, pipe.id)
+ try:
+ function_module = get_function_module_by_id(request, pipe.id)
- # Check if function is a manifold
- if hasattr(function_module, "pipes"):
- sub_pipes = []
-
- # Handle pipes being a list, sync function, or async function
- try:
- if callable(function_module.pipes):
- if asyncio.iscoroutinefunction(function_module.pipes):
- sub_pipes = await function_module.pipes()
- else:
- sub_pipes = function_module.pipes()
- else:
- sub_pipes = function_module.pipes
- except Exception as e:
- log.exception(e)
+ # Check if function is a manifold
+ if hasattr(function_module, "pipes"):
sub_pipes = []
- log.debug(
- f"get_function_models: function '{pipe.id}' is a manifold of {sub_pipes}"
- )
+ # Handle pipes being a list, sync function, or async function
+ try:
+ if callable(function_module.pipes):
+ if asyncio.iscoroutinefunction(function_module.pipes):
+ sub_pipes = await function_module.pipes()
+ else:
+ sub_pipes = function_module.pipes()
+ else:
+ sub_pipes = function_module.pipes
+ except Exception as e:
+ log.exception(e)
+ sub_pipes = []
- for p in sub_pipes:
- sub_pipe_id = f'{pipe.id}.{p["id"]}'
- sub_pipe_name = p["name"]
+ log.debug(
+ f"get_function_models: function '{pipe.id}' is a manifold of {sub_pipes}"
+ )
- if hasattr(function_module, "name"):
- sub_pipe_name = f"{function_module.name}{sub_pipe_name}"
+ for p in sub_pipes:
+ sub_pipe_id = f'{pipe.id}.{p["id"]}'
+ sub_pipe_name = p["name"]
- pipe_flag = {"type": pipe.type}
+ if hasattr(function_module, "name"):
+ sub_pipe_name = f"{function_module.name}{sub_pipe_name}"
+
+ pipe_flag = {"type": pipe.type}
+
+ pipe_models.append(
+ {
+ "id": sub_pipe_id,
+ "name": sub_pipe_name,
+ "object": "model",
+ "created": pipe.created_at,
+ "owned_by": "openai",
+ "pipe": pipe_flag,
+ }
+ )
+ else:
+ pipe_flag = {"type": "pipe"}
+
+ log.debug(
+ f"get_function_models: function '{pipe.id}' is a single pipe {{ 'id': {pipe.id}, 'name': {pipe.name} }}"
+ )
pipe_models.append(
{
- "id": sub_pipe_id,
- "name": sub_pipe_name,
+ "id": pipe.id,
+ "name": pipe.name,
"object": "model",
"created": pipe.created_at,
"owned_by": "openai",
"pipe": pipe_flag,
}
)
- else:
- pipe_flag = {"type": "pipe"}
-
- log.debug(
- f"get_function_models: function '{pipe.id}' is a single pipe {{ 'id': {pipe.id}, 'name': {pipe.name} }}"
- )
-
- pipe_models.append(
- {
- "id": pipe.id,
- "name": pipe.name,
- "object": "model",
- "created": pipe.created_at,
- "owned_by": "openai",
- "pipe": pipe_flag,
- }
- )
+ except Exception as e:
+ log.exception(e)
+ continue
return pipe_models
@@ -221,10 +238,11 @@ async def generate_function_chat_completion(
oauth_token = None
try:
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
- user.id,
- request.cookies.get("oauth_session_id", None),
- )
+ if request.cookies.get("oauth_session_id", None):
+ oauth_token = await request.app.state.oauth_manager.get_oauth_token(
+ user.id,
+ request.cookies.get("oauth_session_id", None),
+ )
except Exception as e:
log.error(f"Error getting OAuth token: {e}")
diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py
index 31a6f0c054..aaf7153c50 100644
--- a/backend/open_webui/main.py
+++ b/backend/open_webui/main.py
@@ -50,6 +50,11 @@ from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import Response, StreamingResponse
from starlette.datastructures import Headers
+from starsessions import (
+ SessionMiddleware as StarSessionsMiddleware,
+ SessionAutoloadMiddleware,
+)
+from starsessions.stores.redis import RedisStore
from open_webui.utils import logger
from open_webui.utils.audit import AuditLevel, AuditLoggingMiddleware
@@ -111,9 +116,6 @@ from open_webui.config import (
OLLAMA_API_CONFIGS,
# OpenAI
ENABLE_OPENAI_API,
- ONEDRIVE_CLIENT_ID,
- ONEDRIVE_SHAREPOINT_URL,
- ONEDRIVE_SHAREPOINT_TENANT_ID,
OPENAI_API_BASE_URLS,
OPENAI_API_KEYS,
OPENAI_API_CONFIGS,
@@ -273,6 +275,7 @@ from open_webui.config import (
WEB_SEARCH_CONCURRENT_REQUESTS,
WEB_SEARCH_TRUST_ENV,
WEB_SEARCH_DOMAIN_FILTER_LIST,
+ OLLAMA_CLOUD_WEB_SEARCH_API_KEY,
JINA_API_KEY,
SEARCHAPI_API_KEY,
SEARCHAPI_ENGINE,
@@ -304,14 +307,17 @@ from open_webui.config import (
GOOGLE_PSE_ENGINE_ID,
GOOGLE_DRIVE_CLIENT_ID,
GOOGLE_DRIVE_API_KEY,
- ONEDRIVE_CLIENT_ID,
+ ENABLE_ONEDRIVE_INTEGRATION,
+ ONEDRIVE_CLIENT_ID_PERSONAL,
+ ONEDRIVE_CLIENT_ID_BUSINESS,
ONEDRIVE_SHAREPOINT_URL,
ONEDRIVE_SHAREPOINT_TENANT_ID,
+ ENABLE_ONEDRIVE_PERSONAL,
+ ENABLE_ONEDRIVE_BUSINESS,
ENABLE_RAG_HYBRID_SEARCH,
ENABLE_RAG_LOCAL_WEB_FETCH,
ENABLE_WEB_LOADER_SSL_VERIFICATION,
ENABLE_GOOGLE_DRIVE_INTEGRATION,
- ENABLE_ONEDRIVE_INTEGRATION,
UPLOAD_DIR,
EXTERNAL_WEB_SEARCH_URL,
EXTERNAL_WEB_SEARCH_API_KEY,
@@ -449,6 +455,7 @@ from open_webui.utils.models import (
get_all_models,
get_all_base_models,
check_model_access,
+ get_filtered_models,
)
from open_webui.utils.chat import (
generate_chat_completion as chat_completion_handler,
@@ -467,7 +474,12 @@ from open_webui.utils.auth import (
get_verified_user,
)
from open_webui.utils.plugin import install_tool_and_function_dependencies
-from open_webui.utils.oauth import OAuthManager
+from open_webui.utils.oauth import (
+ OAuthManager,
+ OAuthClientManager,
+ decrypt_data,
+ OAuthClientInformationFull,
+)
from open_webui.utils.security_headers import SecurityHeadersMiddleware
from open_webui.utils.redis import get_redis_connection
@@ -597,9 +609,14 @@ app = FastAPI(
lifespan=lifespan,
)
+# For Open WebUI OIDC/OAuth2
oauth_manager = OAuthManager(app)
app.state.oauth_manager = oauth_manager
+# For Integrations
+oauth_client_manager = OAuthClientManager(app)
+app.state.oauth_client_manager = oauth_client_manager
+
app.state.instance_id = None
app.state.config = AppConfig(
redis_url=REDIS_URL,
@@ -883,6 +900,8 @@ app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER = BYPASS_WEB_SEARCH_WEB_LOADER
app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION = ENABLE_GOOGLE_DRIVE_INTEGRATION
app.state.config.ENABLE_ONEDRIVE_INTEGRATION = ENABLE_ONEDRIVE_INTEGRATION
+
+app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY = OLLAMA_CLOUD_WEB_SEARCH_API_KEY
app.state.config.SEARXNG_QUERY_URL = SEARXNG_QUERY_URL
app.state.config.YACY_QUERY_URL = YACY_QUERY_URL
app.state.config.YACY_USERNAME = YACY_USERNAME
@@ -1293,33 +1312,6 @@ if audit_level != AuditLevel.NONE:
async def get_models(
request: Request, refresh: bool = False, user=Depends(get_verified_user)
):
- def get_filtered_models(models, user):
- filtered_models = []
- for model in models:
- if model.get("arena"):
- if has_access(
- user.id,
- type="read",
- access_control=model.get("info", {})
- .get("meta", {})
- .get("access_control", {}),
- ):
- filtered_models.append(model)
- continue
-
- model_info = Models.get_model_by_id(model["id"])
- if model_info:
- if (
- (user.role == "admin" and BYPASS_ADMIN_ACCESS_CONTROL)
- or user.id == model_info.user_id
- or has_access(
- user.id, type="read", access_control=model_info.access_control
- )
- ):
- filtered_models.append(model)
-
- return filtered_models
-
all_models = await get_all_models(request, refresh=refresh, user=user)
models = []
@@ -1355,12 +1347,7 @@ async def get_models(
)
)
- # Filter out models that the user does not have access to
- if (
- user.role == "user"
- or (user.role == "admin" and not BYPASS_ADMIN_ACCESS_CONTROL)
- ) and not BYPASS_MODEL_ACCESS_CONTROL:
- models = get_filtered_models(models, user)
+ models = get_filtered_models(models, user)
log.debug(
f"/api/models returned filtered models accessible to the user: {json.dumps([model.get('id') for model in models])}"
@@ -1420,14 +1407,6 @@ async def chat_completion(
model_item = form_data.pop("model_item", {})
tasks = form_data.pop("background_tasks", None)
- oauth_token = None
- try:
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
- user.id, request.cookies.get("oauth_session_id", None)
- )
- except Exception as e:
- log.error(f"Error getting OAuth token: {e}")
-
metadata = {}
try:
if not model_item.get("direct", False):
@@ -1572,6 +1551,14 @@ async def chat_completion(
except:
pass
+ finally:
+ try:
+ if mcp_clients := metadata.get("mcp_clients"):
+ for client in mcp_clients:
+ await client.disconnect()
+ except Exception as e:
+ log.debug(f"Error cleaning up: {e}")
+ pass
if (
metadata.get("session_id")
@@ -1740,6 +1727,14 @@ async def get_app_config(request: Request):
"enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS,
"enable_google_drive_integration": app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION,
"enable_onedrive_integration": app.state.config.ENABLE_ONEDRIVE_INTEGRATION,
+ **(
+ {
+ "enable_onedrive_personal": ENABLE_ONEDRIVE_PERSONAL,
+ "enable_onedrive_business": ENABLE_ONEDRIVE_BUSINESS,
+ }
+ if app.state.config.ENABLE_ONEDRIVE_INTEGRATION
+ else {}
+ ),
}
if user is not None
else {}
@@ -1777,7 +1772,8 @@ async def get_app_config(request: Request):
"api_key": GOOGLE_DRIVE_API_KEY.value,
},
"onedrive": {
- "client_id": ONEDRIVE_CLIENT_ID.value,
+ "client_id_personal": ONEDRIVE_CLIENT_ID_PERSONAL,
+ "client_id_business": ONEDRIVE_CLIENT_ID_BUSINESS,
"sharepoint_url": ONEDRIVE_SHAREPOINT_URL.value,
"sharepoint_tenant_id": ONEDRIVE_SHAREPOINT_TENANT_ID.value,
},
@@ -1897,14 +1893,78 @@ async def get_current_usage(user=Depends(get_verified_user)):
# OAuth Login & Callback
############################
+
+# Initialize OAuth client manager with any MCP tool servers using OAuth 2.1
+if len(app.state.config.TOOL_SERVER_CONNECTIONS) > 0:
+ for tool_server_connection in app.state.config.TOOL_SERVER_CONNECTIONS:
+ if tool_server_connection.get("type", "openapi") == "mcp":
+ server_id = tool_server_connection.get("info", {}).get("id")
+ auth_type = tool_server_connection.get("auth_type", "none")
+ if server_id and auth_type == "oauth_2.1":
+ oauth_client_info = tool_server_connection.get("info", {}).get(
+ "oauth_client_info", ""
+ )
+
+ oauth_client_info = decrypt_data(oauth_client_info)
+ app.state.oauth_client_manager.add_client(
+ f"mcp:{server_id}", OAuthClientInformationFull(**oauth_client_info)
+ )
+
+
# SessionMiddleware is used by authlib for oauth
if len(OAUTH_PROVIDERS) > 0:
- app.add_middleware(
- SessionMiddleware,
- secret_key=WEBUI_SECRET_KEY,
- session_cookie="oui-session",
- same_site=WEBUI_SESSION_COOKIE_SAME_SITE,
- https_only=WEBUI_SESSION_COOKIE_SECURE,
+ try:
+ if REDIS_URL:
+ redis_session_store = RedisStore(
+ url=REDIS_URL,
+ prefix=(
+ f"{REDIS_KEY_PREFIX}:session:" if REDIS_KEY_PREFIX else "session:"
+ ),
+ )
+
+ app.add_middleware(SessionAutoloadMiddleware)
+ app.add_middleware(
+ StarSessionsMiddleware,
+ store=redis_session_store,
+ cookie_name="oui-session",
+ cookie_same_site=WEBUI_SESSION_COOKIE_SAME_SITE,
+ cookie_https_only=WEBUI_SESSION_COOKIE_SECURE,
+ )
+ log.info("Using Redis for session")
+ else:
+ raise ValueError("No Redis URL provided")
+ except Exception as e:
+ app.add_middleware(
+ SessionMiddleware,
+ secret_key=WEBUI_SECRET_KEY,
+ session_cookie="oui-session",
+ same_site=WEBUI_SESSION_COOKIE_SAME_SITE,
+ https_only=WEBUI_SESSION_COOKIE_SECURE,
+ )
+
+
+@app.get("/oauth/clients/{client_id}/authorize")
+async def oauth_client_authorize(
+ client_id: str,
+ request: Request,
+ response: Response,
+ user=Depends(get_verified_user),
+):
+ return await oauth_client_manager.handle_authorize(request, client_id=client_id)
+
+
+@app.get("/oauth/clients/{client_id}/callback")
+async def oauth_client_callback(
+ client_id: str,
+ request: Request,
+ response: Response,
+ user=Depends(get_verified_user),
+):
+ return await oauth_client_manager.handle_callback(
+ request,
+ client_id=client_id,
+ user_id=user.id if user else None,
+ response=response,
)
@@ -1919,8 +1979,9 @@ async def oauth_login(provider: str, request: Request):
# - This is considered insecure in general, as OAuth providers do not always verify email addresses
# 3. If there is no user, and ENABLE_OAUTH_SIGNUP is true, create a user
# - Email addresses are considered unique, so we fail registration if the email address is already taken
-@app.get("/oauth/{provider}/callback")
-async def oauth_callback(provider: str, request: Request, response: Response):
+@app.get("/oauth/{provider}/login/callback")
+@app.get("/oauth/{provider}/callback") # Legacy endpoint
+async def oauth_login_callback(provider: str, request: Request, response: Response):
return await oauth_manager.handle_callback(request, provider, response)
diff --git a/backend/open_webui/models/channels.py b/backend/open_webui/models/channels.py
index 92f238c3a0..e75266be78 100644
--- a/backend/open_webui/models/channels.py
+++ b/backend/open_webui/models/channels.py
@@ -57,6 +57,10 @@ class ChannelModel(BaseModel):
####################
+class ChannelResponse(ChannelModel):
+ write_access: bool = False
+
+
class ChannelForm(BaseModel):
name: str
description: Optional[str] = None
diff --git a/backend/open_webui/models/chats.py b/backend/open_webui/models/chats.py
index 56f992806a..97fd9b6256 100644
--- a/backend/open_webui/models/chats.py
+++ b/backend/open_webui/models/chats.py
@@ -236,7 +236,7 @@ class ChatTable:
return chat.chat.get("title", "New Chat")
- def get_messages_by_chat_id(self, id: str) -> Optional[dict]:
+ def get_messages_map_by_chat_id(self, id: str) -> Optional[dict]:
chat = self.get_chat_by_id(id)
if chat is None:
return None
@@ -492,11 +492,16 @@ class ChatTable:
self,
user_id: str,
include_archived: bool = False,
+ include_folders: bool = False,
skip: Optional[int] = None,
limit: Optional[int] = None,
) -> list[ChatTitleIdResponse]:
with get_db() as db:
- query = db.query(Chat).filter_by(user_id=user_id).filter_by(folder_id=None)
+ query = db.query(Chat).filter_by(user_id=user_id)
+
+ if not include_folders:
+ query = query.filter_by(folder_id=None)
+
query = query.filter(or_(Chat.pinned == False, Chat.pinned == None))
if not include_archived:
@@ -943,6 +948,16 @@ class ChatTable:
return count
+ def count_chats_by_folder_id_and_user_id(self, folder_id: str, user_id: str) -> int:
+ with get_db() as db:
+ query = db.query(Chat).filter_by(user_id=user_id)
+
+ query = query.filter_by(folder_id=folder_id)
+ count = query.count()
+
+ log.info(f"Count of chats for folder '{folder_id}': {count}")
+ return count
+
def delete_tag_by_id_and_user_id_and_tag_name(
self, id: str, user_id: str, tag_name: str
) -> bool:
diff --git a/backend/open_webui/models/files.py b/backend/open_webui/models/files.py
index 57978225d4..bf07b5f86f 100644
--- a/backend/open_webui/models/files.py
+++ b/backend/open_webui/models/files.py
@@ -130,6 +130,17 @@ class FilesTable:
except Exception:
return None
+ def get_file_by_id_and_user_id(self, id: str, user_id: str) -> Optional[FileModel]:
+ with get_db() as db:
+ try:
+ file = db.query(File).filter_by(id=id, user_id=user_id).first()
+ if file:
+ return FileModel.model_validate(file)
+ else:
+ return None
+ except Exception:
+ return None
+
def get_file_metadata_by_id(self, id: str) -> Optional[FileMetadataResponse]:
with get_db() as db:
try:
diff --git a/backend/open_webui/models/functions.py b/backend/open_webui/models/functions.py
index 2bb6d60889..e8ce3aa811 100644
--- a/backend/open_webui/models/functions.py
+++ b/backend/open_webui/models/functions.py
@@ -37,6 +37,7 @@ class Function(Base):
class FunctionMeta(BaseModel):
description: Optional[str] = None
manifest: Optional[dict] = {}
+ model_config = ConfigDict(extra="allow")
class FunctionModel(BaseModel):
@@ -260,6 +261,29 @@ class FunctionsTable:
except Exception:
return None
+ def update_function_metadata_by_id(
+ self, id: str, metadata: dict
+ ) -> Optional[FunctionModel]:
+ with get_db() as db:
+ try:
+ function = db.get(Function, id)
+
+ if function:
+ if function.meta:
+ function.meta = {**function.meta, **metadata}
+ else:
+ function.meta = metadata
+
+ function.updated_at = int(time.time())
+ db.commit()
+ db.refresh(function)
+ return self.get_function_by_id(id)
+ else:
+ return None
+ except Exception as e:
+ log.exception(f"Error updating function metadata by id {id}: {e}")
+ return None
+
def get_user_valves_by_id_and_user_id(
self, id: str, user_id: str
) -> Optional[dict]:
diff --git a/backend/open_webui/models/messages.py b/backend/open_webui/models/messages.py
index a27ae52519..ff4553ee9d 100644
--- a/backend/open_webui/models/messages.py
+++ b/backend/open_webui/models/messages.py
@@ -201,8 +201,14 @@ class MessageTable:
with get_db() as db:
message = db.get(Message, id)
message.content = form_data.content
- message.data = form_data.data
- message.meta = form_data.meta
+ message.data = {
+ **(message.data if message.data else {}),
+ **(form_data.data if form_data.data else {}),
+ }
+ message.meta = {
+ **(message.meta if message.meta else {}),
+ **(form_data.meta if form_data.meta else {}),
+ }
message.updated_at = int(time.time_ns())
db.commit()
db.refresh(message)
diff --git a/backend/open_webui/models/notes.py b/backend/open_webui/models/notes.py
index c720ff80a4..f1b11f071e 100644
--- a/backend/open_webui/models/notes.py
+++ b/backend/open_webui/models/notes.py
@@ -2,6 +2,7 @@ import json
import time
import uuid
from typing import Optional
+from functools import lru_cache
from open_webui.internal.db import Base, get_db
from open_webui.models.groups import Groups
@@ -97,22 +98,85 @@ class NoteTable:
db.commit()
return note
- def get_notes(self) -> list[NoteModel]:
+ def get_notes(
+ self, skip: Optional[int] = None, limit: Optional[int] = None
+ ) -> list[NoteModel]:
with get_db() as db:
- notes = db.query(Note).order_by(Note.updated_at.desc()).all()
+ query = db.query(Note).order_by(Note.updated_at.desc())
+ if skip is not None:
+ query = query.offset(skip)
+ if limit is not None:
+ query = query.limit(limit)
+ notes = query.all()
return [NoteModel.model_validate(note) for note in notes]
def get_notes_by_user_id(
- self, user_id: str, permission: str = "write"
+ self,
+ user_id: str,
+ skip: Optional[int] = None,
+ limit: Optional[int] = None,
) -> list[NoteModel]:
- notes = self.get_notes()
- user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user_id)}
- return [
- note
- for note in notes
- if note.user_id == user_id
- or has_access(user_id, permission, note.access_control, user_group_ids)
- ]
+ with get_db() as db:
+ query = db.query(Note).filter(Note.user_id == user_id)
+ query = query.order_by(Note.updated_at.desc())
+
+ if skip is not None:
+ query = query.offset(skip)
+ if limit is not None:
+ query = query.limit(limit)
+
+ notes = query.all()
+ return [NoteModel.model_validate(note) for note in notes]
+
+ def get_notes_by_permission(
+ self,
+ user_id: str,
+ permission: str = "write",
+ skip: Optional[int] = None,
+ limit: Optional[int] = None,
+ ) -> list[NoteModel]:
+ with get_db() as db:
+ user_groups = Groups.get_groups_by_member_id(user_id)
+ user_group_ids = {group.id for group in user_groups}
+
+ # Order newest-first. We stream to keep memory usage low.
+ query = (
+ db.query(Note)
+ .order_by(Note.updated_at.desc())
+ .execution_options(stream_results=True)
+ .yield_per(256)
+ )
+
+ results: list[NoteModel] = []
+ n_skipped = 0
+
+ for note in query:
+ # Fast-pass #1: owner
+ if note.user_id == user_id:
+ permitted = True
+ # Fast-pass #2: public/open
+ elif note.access_control is None:
+ # Technically this should mean public access for both read and write, but we'll only do read for now
+ # We might want to change this behavior later
+ permitted = permission == "read"
+ else:
+ permitted = has_access(
+ user_id, permission, note.access_control, user_group_ids
+ )
+
+ if not permitted:
+ continue
+
+ # Apply skip AFTER permission filtering so it counts only accessible notes
+ if skip and n_skipped < skip:
+ n_skipped += 1
+ continue
+
+ results.append(NoteModel.model_validate(note))
+ if limit is not None and len(results) >= limit:
+ break
+
+ return results
def get_note_by_id(self, id: str) -> Optional[NoteModel]:
with get_db() as db:
diff --git a/backend/open_webui/models/oauth_sessions.py b/backend/open_webui/models/oauth_sessions.py
index 9fd5335ce5..81ce220384 100644
--- a/backend/open_webui/models/oauth_sessions.py
+++ b/backend/open_webui/models/oauth_sessions.py
@@ -176,6 +176,26 @@ class OAuthSessionTable:
log.error(f"Error getting OAuth session by ID: {e}")
return None
+ def get_session_by_provider_and_user_id(
+ self, provider: str, user_id: str
+ ) -> Optional[OAuthSessionModel]:
+ """Get OAuth session by provider and user ID"""
+ try:
+ with get_db() as db:
+ session = (
+ db.query(OAuthSession)
+ .filter_by(provider=provider, user_id=user_id)
+ .first()
+ )
+ if session:
+ session.token = self._decrypt_token(session.token)
+ return OAuthSessionModel.model_validate(session)
+
+ return None
+ except Exception as e:
+ log.error(f"Error getting OAuth session by provider and user ID: {e}")
+ return None
+
def get_sessions_by_user_id(self, user_id: str) -> List[OAuthSessionModel]:
"""Get all OAuth sessions for a user"""
try:
diff --git a/backend/open_webui/models/tools.py b/backend/open_webui/models/tools.py
index 3a47fa008d..48f84b3ac4 100644
--- a/backend/open_webui/models/tools.py
+++ b/backend/open_webui/models/tools.py
@@ -95,6 +95,8 @@ class ToolResponse(BaseModel):
class ToolUserResponse(ToolResponse):
user: Optional[UserResponse] = None
+ model_config = ConfigDict(extra="allow")
+
class ToolForm(BaseModel):
id: str
diff --git a/backend/open_webui/models/users.py b/backend/open_webui/models/users.py
index 620a746eed..05000744dd 100644
--- a/backend/open_webui/models/users.py
+++ b/backend/open_webui/models/users.py
@@ -107,11 +107,21 @@ class UserInfoResponse(BaseModel):
role: str
+class UserIdNameResponse(BaseModel):
+ id: str
+ name: str
+
+
class UserInfoListResponse(BaseModel):
users: list[UserInfoResponse]
total: int
+class UserIdNameListResponse(BaseModel):
+ users: list[UserIdNameResponse]
+ total: int
+
+
class UserResponse(BaseModel):
id: str
name: str
@@ -210,7 +220,7 @@ class UsersTable:
filter: Optional[dict] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
- ) -> UserListResponse:
+ ) -> dict:
with get_db() as db:
query = db.query(User)
diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py
index dead8458cb..65da1592e1 100644
--- a/backend/open_webui/retrieval/utils.py
+++ b/backend/open_webui/retrieval/utils.py
@@ -19,10 +19,13 @@ from open_webui.retrieval.vector.factory import VECTOR_DB_CLIENT
from open_webui.models.users import UserModel
from open_webui.models.files import Files
from open_webui.models.knowledge import Knowledges
+
+from open_webui.models.chats import Chats
from open_webui.models.notes import Notes
from open_webui.retrieval.vector.main import GetResult
from open_webui.utils.access_control import has_access
+from open_webui.utils.misc import get_message_list
from open_webui.env import (
@@ -124,7 +127,13 @@ def query_doc_with_hybrid_search(
hybrid_bm25_weight: float,
) -> dict:
try:
- if not collection_result.documents[0]:
+ if (
+ not collection_result
+ or not hasattr(collection_result, "documents")
+ or not collection_result.documents
+ or len(collection_result.documents) == 0
+ or not collection_result.documents[0]
+ ):
log.warning(f"query_doc_with_hybrid_search:no_docs {collection_name}")
return {"documents": [], "metadatas": [], "distances": []}
@@ -432,13 +441,14 @@ def get_embedding_function(
if isinstance(query, list):
embeddings = []
for i in range(0, len(query), embedding_batch_size):
- embeddings.extend(
- func(
- query[i : i + embedding_batch_size],
- prefix=prefix,
- user=user,
- )
+ batch_embeddings = func(
+ query[i : i + embedding_batch_size],
+ prefix=prefix,
+ user=user,
)
+
+ if isinstance(batch_embeddings, list):
+ embeddings.extend(batch_embeddings)
return embeddings
else:
return func(query, prefix, user)
@@ -490,25 +500,37 @@ def get_sources_from_items(
# Raw Text
# Used during temporary chat file uploads or web page & youtube attachements
- if item.get("collection_name"):
- # If item has a collection name, use it
- collection_names.append(item.get("collection_name"))
- elif item.get("file"):
- # if item has file data, use it
- query_result = {
- "documents": [
- [item.get("file", {}).get("data", {}).get("content")]
- ],
- "metadatas": [[item.get("file", {}).get("meta", {})]],
- }
- else:
- # Fallback to item content
- query_result = {
- "documents": [[item.get("content")]],
- "metadatas": [
- [{"file_id": item.get("id"), "name": item.get("name")}]
- ],
- }
+ if item.get("context") == "full":
+ if item.get("file"):
+ # if item has file data, use it
+ query_result = {
+ "documents": [
+ [item.get("file", {}).get("data", {}).get("content")]
+ ],
+ "metadatas": [[item.get("file", {}).get("meta", {})]],
+ }
+
+ if query_result is None:
+ # Fallback
+ if item.get("collection_name"):
+ # If item has a collection name, use it
+ collection_names.append(item.get("collection_name"))
+ elif item.get("file"):
+ # If item has file data, use it
+ query_result = {
+ "documents": [
+ [item.get("file", {}).get("data", {}).get("content")]
+ ],
+ "metadatas": [[item.get("file", {}).get("meta", {})]],
+ }
+ else:
+ # Fallback to item content
+ query_result = {
+ "documents": [[item.get("content")]],
+ "metadatas": [
+ [{"file_id": item.get("id"), "name": item.get("name")}]
+ ],
+ }
elif item.get("type") == "note":
# Note Attached
@@ -525,6 +547,30 @@ def get_sources_from_items(
"metadatas": [[{"file_id": note.id, "name": note.title}]],
}
+ elif item.get("type") == "chat":
+ # Chat Attached
+ chat = Chats.get_chat_by_id(item.get("id"))
+
+ if chat and (user.role == "admin" or chat.user_id == user.id):
+ messages_map = chat.chat.get("history", {}).get("messages", {})
+ message_id = chat.chat.get("history", {}).get("currentId")
+
+ if messages_map and message_id:
+ # Reconstruct the message list in order
+ message_list = get_message_list(messages_map, message_id)
+ message_history = "\n".join(
+ [
+ f"#### {m.get('role', 'user').capitalize()}\n{m.get('content')}\n"
+ for m in message_list
+ ]
+ )
+
+ # User has access to the chat
+ query_result = {
+ "documents": [[message_history]],
+ "metadatas": [[{"file_id": chat.id, "name": chat.title}]],
+ }
+
elif item.get("type") == "file":
if (
item.get("context") == "full"
@@ -581,6 +627,7 @@ def get_sources_from_items(
if knowledge_base and (
user.role == "admin"
+ or knowledge_base.user_id == user.id
or has_access(user.id, "read", knowledge_base.access_control)
):
diff --git a/backend/open_webui/retrieval/web/ollama.py b/backend/open_webui/retrieval/web/ollama.py
new file mode 100644
index 0000000000..a199a14389
--- /dev/null
+++ b/backend/open_webui/retrieval/web/ollama.py
@@ -0,0 +1,51 @@
+import logging
+from dataclasses import dataclass
+from typing import Optional
+
+import requests
+from open_webui.env import SRC_LOG_LEVELS
+from open_webui.retrieval.web.main import SearchResult
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
+
+def search_ollama_cloud(
+ url: str,
+ api_key: str,
+ query: str,
+ count: int,
+ filter_list: Optional[list[str]] = None,
+) -> list[SearchResult]:
+ """Search using Ollama Search API and return the results as a list of SearchResult objects.
+
+ Args:
+ api_key (str): A Ollama Search API key
+ query (str): The query to search for
+ count (int): Number of results to return
+ filter_list (Optional[list[str]]): List of domains to filter results by
+ """
+ log.info(f"Searching with Ollama for query: {query}")
+
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
+ payload = {"query": query, "max_results": count}
+
+ try:
+ response = requests.post(f"{url}/api/web_search", headers=headers, json=payload)
+ response.raise_for_status()
+ data = response.json()
+
+ results = data.get("results", [])
+ log.info(f"Found {len(results)} results")
+
+ return [
+ SearchResult(
+ link=result.get("url", ""),
+ title=result.get("title", ""),
+ snippet=result.get("content", ""),
+ )
+ for result in results
+ ]
+ except Exception as e:
+ log.error(f"Error searching Ollama: {e}")
+ return []
diff --git a/backend/open_webui/retrieval/web/perplexity_search.py b/backend/open_webui/retrieval/web/perplexity_search.py
new file mode 100644
index 0000000000..e3e0caa2b3
--- /dev/null
+++ b/backend/open_webui/retrieval/web/perplexity_search.py
@@ -0,0 +1,64 @@
+import logging
+from typing import Optional, Literal
+import requests
+
+from open_webui.retrieval.web.main import SearchResult, get_filtered_results
+from open_webui.env import SRC_LOG_LEVELS
+
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
+
+def search_perplexity_search(
+ api_key: str,
+ query: str,
+ count: int,
+ filter_list: Optional[list[str]] = None,
+) -> list[SearchResult]:
+ """Search using Perplexity API and return the results as a list of SearchResult objects.
+
+ Args:
+ api_key (str): A Perplexity API key
+ query (str): The query to search for
+ count (int): Maximum number of results to return
+ filter_list (Optional[list[str]]): List of domains to filter results
+
+ """
+
+ # Handle PersistentConfig object
+ if hasattr(api_key, "__str__"):
+ api_key = str(api_key)
+
+ try:
+ url = "https://api.perplexity.ai/search"
+
+ # Create payload for the API call
+ payload = {
+ "query": query,
+ "max_results": count,
+ }
+
+ headers = {
+ "Authorization": f"Bearer {api_key}",
+ "Content-Type": "application/json",
+ }
+
+ # Make the API request
+ response = requests.request("POST", url, json=payload, headers=headers)
+ # Parse the JSON response
+ json_response = response.json()
+
+ # Extract citations from the response
+ results = json_response.get("results", [])
+
+ return [
+ SearchResult(
+ link=result["url"], title=result["title"], snippet=result["snippet"]
+ )
+ for result in results
+ ]
+
+ except Exception as e:
+ log.error(f"Error searching with Perplexity Search API: {e}")
+ return []
diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py
index 4d50ee9e7e..100610a83a 100644
--- a/backend/open_webui/routers/audio.py
+++ b/backend/open_webui/routers/audio.py
@@ -337,10 +337,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
timeout=timeout, trust_env=True
) as session:
r = await session.post(
- url=urljoin(
- request.app.state.config.TTS_OPENAI_API_BASE_URL,
- "/audio/speech",
- ),
+ url=f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/speech",
json=payload,
headers={
"Content-Type": "application/json",
@@ -468,10 +465,8 @@ async def speech(request: Request, user=Depends(get_verified_user)):
timeout=timeout, trust_env=True
) as session:
async with session.post(
- urljoin(
- base_url or f"https://{region}.tts.speech.microsoft.com",
- "/cognitiveservices/v1",
- ),
+ (base_url or f"https://{region}.tts.speech.microsoft.com")
+ + "/cognitiveservices/v1",
headers={
"Ocp-Apim-Subscription-Key": request.app.state.config.TTS_API_KEY,
"Content-Type": "application/ssml+xml",
@@ -555,7 +550,7 @@ def transcription_handler(request, file_path, metadata):
metadata = metadata or {}
languages = [
- metadata.get("language", None) if WHISPER_LANGUAGE == "" else WHISPER_LANGUAGE,
+ metadata.get("language", None) if not WHISPER_LANGUAGE else WHISPER_LANGUAGE,
None, # Always fallback to None in case transcription fails
]
diff --git a/backend/open_webui/routers/channels.py b/backend/open_webui/routers/channels.py
index cf3603c6ff..e7b8366347 100644
--- a/backend/open_webui/routers/channels.py
+++ b/backend/open_webui/routers/channels.py
@@ -10,7 +10,13 @@ from pydantic import BaseModel
from open_webui.socket.main import sio, get_user_ids_from_room
from open_webui.models.users import Users, UserNameResponse
-from open_webui.models.channels import Channels, ChannelModel, ChannelForm
+from open_webui.models.groups import Groups
+from open_webui.models.channels import (
+ Channels,
+ ChannelModel,
+ ChannelForm,
+ ChannelResponse,
+)
from open_webui.models.messages import (
Messages,
MessageModel,
@@ -24,9 +30,17 @@ from open_webui.constants import ERROR_MESSAGES
from open_webui.env import SRC_LOG_LEVELS
+from open_webui.utils.models import (
+ get_all_models,
+ get_filtered_models,
+)
+from open_webui.utils.chat import generate_chat_completion
+
+
from open_webui.utils.auth import get_admin_user, get_verified_user
from open_webui.utils.access_control import has_access, get_users_with_access
from open_webui.utils.webhook import post_webhook
+from open_webui.utils.channels import extract_mentions, replace_mentions
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MODELS"])
@@ -72,7 +86,7 @@ async def create_new_channel(form_data: ChannelForm, user=Depends(get_admin_user
############################
-@router.get("/{id}", response_model=Optional[ChannelModel])
+@router.get("/{id}", response_model=Optional[ChannelResponse])
async def get_channel_by_id(id: str, user=Depends(get_verified_user)):
channel = Channels.get_channel_by_id(id)
if not channel:
@@ -87,7 +101,16 @@ async def get_channel_by_id(id: str, user=Depends(get_verified_user)):
status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT()
)
- return ChannelModel(**channel.model_dump())
+ write_access = has_access(
+ user.id, type="write", access_control=channel.access_control, strict=False
+ )
+
+ return ChannelResponse(
+ **{
+ **channel.model_dump(),
+ "write_access": write_access or user.role == "admin",
+ }
+ )
############################
@@ -200,14 +223,11 @@ async def send_notification(name, webui_url, channel, message, active_user_ids):
users = get_users_with_access("read", channel.access_control)
for user in users:
- if user.id in active_user_ids:
- continue
- else:
+ if user.id not in active_user_ids:
if user.settings:
webhook_url = user.settings.ui.get("notifications", {}).get(
"webhook_url", None
)
-
if webhook_url:
await post_webhook(
name,
@@ -221,14 +241,155 @@ async def send_notification(name, webui_url, channel, message, active_user_ids):
},
)
+ return True
-@router.post("/{id}/messages/post", response_model=Optional[MessageModel])
-async def post_new_message(
- request: Request,
- id: str,
- form_data: MessageForm,
- background_tasks: BackgroundTasks,
- user=Depends(get_verified_user),
+
+async def model_response_handler(request, channel, message, user):
+ MODELS = {
+ model["id"]: model
+ for model in get_filtered_models(await get_all_models(request, user=user), user)
+ }
+
+ mentions = extract_mentions(message.content)
+ message_content = replace_mentions(message.content)
+
+ # check if any of the mentions are models
+ model_mentions = [mention for mention in mentions if mention["id_type"] == "M"]
+ if not model_mentions:
+ return False
+
+ for mention in model_mentions:
+ model_id = mention["id"]
+ model = MODELS.get(model_id, None)
+
+ if model:
+ try:
+ # reverse to get in chronological order
+ thread_messages = Messages.get_messages_by_parent_id(
+ channel.id,
+ message.parent_id if message.parent_id else message.id,
+ )[::-1]
+
+ response_message, channel = await new_message_handler(
+ request,
+ channel.id,
+ MessageForm(
+ **{
+ "parent_id": (
+ message.parent_id if message.parent_id else message.id
+ ),
+ "content": f"",
+ "data": {},
+ "meta": {
+ "model_id": model_id,
+ "model_name": model.get("name", model_id),
+ },
+ }
+ ),
+ user,
+ )
+
+ thread_history = []
+ images = []
+ message_users = {}
+
+ for thread_message in thread_messages:
+ message_user = None
+ if thread_message.user_id not in message_users:
+ message_user = Users.get_user_by_id(thread_message.user_id)
+ message_users[thread_message.user_id] = message_user
+ else:
+ message_user = message_users[thread_message.user_id]
+
+ if thread_message.meta and thread_message.meta.get(
+ "model_id", None
+ ):
+ # If the message was sent by a model, use the model name
+ message_model_id = thread_message.meta.get("model_id", None)
+ message_model = MODELS.get(message_model_id, None)
+ username = (
+ message_model.get("name", message_model_id)
+ if message_model
+ else message_model_id
+ )
+ else:
+ username = message_user.name if message_user else "Unknown"
+
+ thread_history.append(
+ f"{username}: {replace_mentions(thread_message.content)}"
+ )
+
+ thread_message_files = thread_message.data.get("files", [])
+ for file in thread_message_files:
+ if file.get("type", "") == "image":
+ images.append(file.get("url", ""))
+
+ system_message = {
+ "role": "system",
+ "content": f"You are {model.get('name', model_id)}, an AI assistant participating in a threaded conversation. Be helpful, concise, and conversational."
+ + (
+ f"Here's the thread history:\n\n{''.join([f'{msg}' for msg in thread_history])}\n\nContinue the conversation naturally, addressing the most recent message while being aware of the full context."
+ if thread_history
+ else ""
+ ),
+ }
+
+ content = f"{user.name if user else 'User'}: {message_content}"
+ if images:
+ content = [
+ {
+ "type": "text",
+ "text": content,
+ },
+ *[
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": image,
+ },
+ }
+ for image in images
+ ],
+ ]
+
+ form_data = {
+ "model": model_id,
+ "messages": [
+ system_message,
+ {"role": "user", "content": content},
+ ],
+ "stream": False,
+ }
+
+ res = await generate_chat_completion(
+ request,
+ form_data=form_data,
+ user=user,
+ )
+
+ if res:
+ await update_message_by_id(
+ channel.id,
+ response_message.id,
+ MessageForm(
+ **{
+ "content": res["choices"][0]["message"]["content"],
+ "meta": {
+ "done": True,
+ },
+ }
+ ),
+ user,
+ )
+ except Exception as e:
+ log.info(e)
+ pass
+
+ return True
+
+
+async def new_message_handler(
+ request: Request, id: str, form_data: MessageForm, user=Depends(get_verified_user)
):
channel = Channels.get_channel_by_id(id)
if not channel:
@@ -237,7 +398,7 @@ async def post_new_message(
)
if user.role != "admin" and not has_access(
- user.id, type="read", access_control=channel.access_control
+ user.id, type="write", access_control=channel.access_control, strict=False
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT()
@@ -302,11 +463,30 @@ async def post_new_message(
},
to=f"channel:{channel.id}",
)
+ return MessageModel(**message.model_dump()), channel
+ except Exception as e:
+ log.exception(e)
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT()
+ )
- active_user_ids = get_user_ids_from_room(f"channel:{channel.id}")
- background_tasks.add_task(
- send_notification,
+@router.post("/{id}/messages/post", response_model=Optional[MessageModel])
+async def post_new_message(
+ request: Request,
+ id: str,
+ form_data: MessageForm,
+ background_tasks: BackgroundTasks,
+ user=Depends(get_verified_user),
+):
+
+ try:
+ message, channel = await new_message_handler(request, id, form_data, user)
+ active_user_ids = get_user_ids_from_room(f"channel:{channel.id}")
+
+ async def background_handler():
+ await model_response_handler(request, channel, message, user)
+ await send_notification(
request.app.state.WEBUI_NAME,
request.app.state.config.WEBUI_URL,
channel,
@@ -314,7 +494,12 @@ async def post_new_message(
active_user_ids,
)
- return MessageModel(**message.model_dump())
+ background_tasks.add_task(background_handler)
+
+ return message
+
+ except HTTPException as e:
+ raise e
except Exception as e:
log.exception(e)
raise HTTPException(
@@ -509,7 +694,7 @@ async def add_reaction_to_message(
)
if user.role != "admin" and not has_access(
- user.id, type="read", access_control=channel.access_control
+ user.id, type="write", access_control=channel.access_control, strict=False
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT()
@@ -575,7 +760,7 @@ async def remove_reaction_by_id_and_user_id_and_name(
)
if user.role != "admin" and not has_access(
- user.id, type="read", access_control=channel.access_control
+ user.id, type="write", access_control=channel.access_control, strict=False
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT()
@@ -657,7 +842,9 @@ async def delete_message_by_id(
if (
user.role != "admin"
and message.user_id != user.id
- and not has_access(user.id, type="read", access_control=channel.access_control)
+ and not has_access(
+ user.id, type="write", access_control=channel.access_control, strict=False
+ )
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT()
diff --git a/backend/open_webui/routers/chats.py b/backend/open_webui/routers/chats.py
index 6f853ab266..788e355f2b 100644
--- a/backend/open_webui/routers/chats.py
+++ b/backend/open_webui/routers/chats.py
@@ -37,7 +37,9 @@ router = APIRouter()
@router.get("/", response_model=list[ChatTitleIdResponse])
@router.get("/list", response_model=list[ChatTitleIdResponse])
def get_session_user_chat_list(
- user=Depends(get_verified_user), page: Optional[int] = None
+ user=Depends(get_verified_user),
+ page: Optional[int] = None,
+ include_folders: Optional[bool] = False,
):
try:
if page is not None:
@@ -45,10 +47,12 @@ def get_session_user_chat_list(
skip = (page - 1) * limit
return Chats.get_chat_title_id_list_by_user_id(
- user.id, skip=skip, limit=limit
+ user.id, include_folders=include_folders, skip=skip, limit=limit
)
else:
- return Chats.get_chat_title_id_list_by_user_id(user.id)
+ return Chats.get_chat_title_id_list_by_user_id(
+ user.id, include_folders=include_folders
+ )
except Exception as e:
log.exception(e)
raise HTTPException(
@@ -166,7 +170,7 @@ async def import_chat(form_data: ChatImportForm, user=Depends(get_verified_user)
@router.get("/search", response_model=list[ChatTitleIdResponse])
-async def search_user_chats(
+def search_user_chats(
text: str, page: Optional[int] = None, user=Depends(get_verified_user)
):
if page is None:
diff --git a/backend/open_webui/routers/configs.py b/backend/open_webui/routers/configs.py
index 8ce4e0d247..d4b88032e2 100644
--- a/backend/open_webui/routers/configs.py
+++ b/backend/open_webui/routers/configs.py
@@ -1,5 +1,7 @@
+import logging
from fastapi import APIRouter, Depends, Request, HTTPException
from pydantic import BaseModel, ConfigDict
+import aiohttp
from typing import Optional
@@ -12,10 +14,24 @@ from open_webui.utils.tools import (
get_tool_server_url,
set_tool_servers,
)
+from open_webui.utils.mcp.client import MCPClient
+from open_webui.env import SRC_LOG_LEVELS
+
+from open_webui.utils.oauth import (
+ get_discovery_urls,
+ get_oauth_client_info_with_dynamic_client_registration,
+ encrypt_data,
+ decrypt_data,
+ OAuthClientInformationFull,
+)
+from mcp.shared.auth import OAuthMetadata
router = APIRouter()
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
+
############################
# ImportConfig
@@ -79,6 +95,43 @@ async def set_connections_config(
}
+class OAuthClientRegistrationForm(BaseModel):
+ url: str
+ client_id: str
+ client_name: Optional[str] = None
+
+
+@router.post("/oauth/clients/register")
+async def register_oauth_client(
+ request: Request,
+ form_data: OAuthClientRegistrationForm,
+ type: Optional[str] = None,
+ user=Depends(get_admin_user),
+):
+ try:
+ oauth_client_id = form_data.client_id
+ if type:
+ oauth_client_id = f"{type}:{form_data.client_id}"
+
+ oauth_client_info = (
+ await get_oauth_client_info_with_dynamic_client_registration(
+ request, oauth_client_id, form_data.url
+ )
+ )
+ return {
+ "status": True,
+ "oauth_client_info": encrypt_data(
+ oauth_client_info.model_dump(mode="json")
+ ),
+ }
+ except Exception as e:
+ log.debug(f"Failed to register OAuth client: {e}")
+ raise HTTPException(
+ status_code=400,
+ detail=f"Failed to register OAuth client",
+ )
+
+
############################
# ToolServers Config
############################
@@ -87,6 +140,7 @@ async def set_connections_config(
class ToolServerConnection(BaseModel):
url: str
path: str
+ type: Optional[str] = "openapi" # openapi, mcp
auth_type: Optional[str]
key: Optional[str]
config: Optional[dict]
@@ -114,8 +168,29 @@ async def set_tool_servers_config(
request.app.state.config.TOOL_SERVER_CONNECTIONS = [
connection.model_dump() for connection in form_data.TOOL_SERVER_CONNECTIONS
]
+
await set_tool_servers(request)
+ for connection in request.app.state.config.TOOL_SERVER_CONNECTIONS:
+ server_type = connection.get("type", "openapi")
+ if server_type == "mcp":
+ server_id = connection.get("info", {}).get("id")
+ auth_type = connection.get("auth_type", "none")
+ if auth_type == "oauth_2.1" and server_id:
+ try:
+ oauth_client_info = connection.get("info", {}).get(
+ "oauth_client_info", ""
+ )
+ oauth_client_info = decrypt_data(oauth_client_info)
+
+ await request.app.state.oauth_client_manager.add_client(
+ f"{server_type}:{server_id}",
+ OAuthClientInformationFull(**oauth_client_info),
+ )
+ except Exception as e:
+ log.debug(f"Failed to add OAuth client for MCP tool server: {e}")
+ continue
+
return {
"TOOL_SERVER_CONNECTIONS": request.app.state.config.TOOL_SERVER_CONNECTIONS,
}
@@ -129,19 +204,105 @@ async def verify_tool_servers_config(
Verify the connection to the tool server.
"""
try:
+ if form_data.type == "mcp":
+ if form_data.auth_type == "oauth_2.1":
+ discovery_urls = get_discovery_urls(form_data.url)
+ async with aiohttp.ClientSession() as session:
+ async with session.get(
+ discovery_urls[0]
+ ) as oauth_server_metadata_response:
+ if oauth_server_metadata_response.status != 200:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Failed to fetch OAuth 2.1 discovery document from {discovery_urls[0]}",
+ )
- token = None
- if form_data.auth_type == "bearer":
- token = form_data.key
- elif form_data.auth_type == "session":
- token = request.state.token.credentials
+ try:
+ oauth_server_metadata = OAuthMetadata.model_validate(
+ await oauth_server_metadata_response.json()
+ )
+ return {
+ "status": True,
+ "oauth_server_metadata": oauth_server_metadata.model_dump(
+ mode="json"
+ ),
+ }
+ except Exception as e:
+ log.info(
+ f"Failed to parse OAuth 2.1 discovery document: {e}"
+ )
+ raise HTTPException(
+ status_code=400,
+ detail=f"Failed to parse OAuth 2.1 discovery document from {discovery_urls[0]}",
+ )
- url = get_tool_server_url(form_data.url, form_data.path)
- return await get_tool_server_data(token, url)
+ raise HTTPException(
+ status_code=400,
+ detail=f"Failed to fetch OAuth 2.1 discovery document from {discovery_urls[0]}",
+ )
+ else:
+ try:
+ client = MCPClient()
+ headers = None
+
+ token = None
+ if form_data.auth_type == "bearer":
+ token = form_data.key
+ elif form_data.auth_type == "session":
+ token = request.state.token.credentials
+ elif form_data.auth_type == "system_oauth":
+ try:
+ if request.cookies.get("oauth_session_id", None):
+ token = await request.app.state.oauth_manager.get_oauth_token(
+ user.id,
+ request.cookies.get("oauth_session_id", None),
+ )
+ except Exception as e:
+ pass
+
+ if token:
+ headers = {"Authorization": f"Bearer {token}"}
+
+ await client.connect(form_data.url, headers=headers)
+ specs = await client.list_tool_specs()
+ return {
+ "status": True,
+ "specs": specs,
+ }
+ except Exception as e:
+ log.debug(f"Failed to create MCP client: {e}")
+ raise HTTPException(
+ status_code=400,
+ detail=f"Failed to create MCP client",
+ )
+ finally:
+ if client:
+ await client.disconnect()
+ else: # openapi
+ token = None
+ if form_data.auth_type == "bearer":
+ token = form_data.key
+ elif form_data.auth_type == "session":
+ token = request.state.token.credentials
+ elif form_data.auth_type == "system_oauth":
+ try:
+ if request.cookies.get("oauth_session_id", None):
+ token = await request.app.state.oauth_manager.get_oauth_token(
+ user.id,
+ request.cookies.get("oauth_session_id", None),
+ )
+ except Exception as e:
+ pass
+
+ url = get_tool_server_url(form_data.url, form_data.path)
+ return await get_tool_server_data(token, url)
+ except HTTPException as e:
+ raise e
except Exception as e:
+ log.debug(f"Failed to connect to the tool server: {e}")
raise HTTPException(
status_code=400,
- detail=f"Failed to connect to the tool server: {str(e)}",
+ detail=f"Failed to connect to the tool server",
)
diff --git a/backend/open_webui/routers/files.py b/backend/open_webui/routers/files.py
index 778fbdec27..84d8f841cf 100644
--- a/backend/open_webui/routers/files.py
+++ b/backend/open_webui/routers/files.py
@@ -120,11 +120,6 @@ def process_uploaded_file(request, file, file_path, file_item, file_metadata, us
f"File type {file.content_type} is not provided, but trying to process anyway"
)
process_file(request, ProcessFileForm(file_id=file_item.id), user=user)
-
- Files.update_file_data_by_id(
- file_item.id,
- {"status": "completed"},
- )
except Exception as e:
log.error(f"Error processing file: {file_item.id}")
Files.update_file_data_by_id(
diff --git a/backend/open_webui/routers/folders.py b/backend/open_webui/routers/folders.py
index 36dbfee5c5..ddee71ea4d 100644
--- a/backend/open_webui/routers/folders.py
+++ b/backend/open_webui/routers/folders.py
@@ -262,15 +262,15 @@ async def update_folder_is_expanded_by_id(
async def delete_folder_by_id(
request: Request, id: str, user=Depends(get_verified_user)
):
- chat_delete_permission = has_permission(
- user.id, "chat.delete", request.app.state.config.USER_PERMISSIONS
- )
-
- if user.role != "admin" and not chat_delete_permission:
- raise HTTPException(
- status_code=status.HTTP_403_FORBIDDEN,
- detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
+ if Chats.count_chats_by_folder_id_and_user_id(id, user.id):
+ chat_delete_permission = has_permission(
+ user.id, "chat.delete", request.app.state.config.USER_PERMISSIONS
)
+ if user.role != "admin" and not chat_delete_permission:
+ raise HTTPException(
+ status_code=status.HTTP_403_FORBIDDEN,
+ detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
+ )
folder = Folders.get_folder_by_id_and_user_id(id, user.id)
if folder:
diff --git a/backend/open_webui/routers/functions.py b/backend/open_webui/routers/functions.py
index 9ef6915709..c36e656d5f 100644
--- a/backend/open_webui/routers/functions.py
+++ b/backend/open_webui/routers/functions.py
@@ -148,6 +148,18 @@ async def sync_functions(
content=function.content,
)
+ if hasattr(function_module, "Valves") and function.valves:
+ Valves = function_module.Valves
+ try:
+ Valves(
+ **{k: v for k, v in function.valves.items() if v is not None}
+ )
+ except Exception as e:
+ log.exception(
+ f"Error validating valves for function {function.id}: {e}"
+ )
+ raise e
+
return Functions.sync_functions(user.id, form_data.functions)
except Exception as e:
log.exception(f"Failed to load a function: {e}")
@@ -192,6 +204,9 @@ async def create_new_function(
function_cache_dir = CACHE_DIR / "functions" / form_data.id
function_cache_dir.mkdir(parents=True, exist_ok=True)
+ if function_type == "filter" and getattr(function_module, "toggle", None):
+ Functions.update_function_metadata_by_id(id, {"toggle": True})
+
if function:
return function
else:
@@ -308,6 +323,9 @@ async def update_function_by_id(
function = Functions.update_function_by_id(id, updated)
+ if function_type == "filter" and getattr(function_module, "toggle", None):
+ Functions.update_function_metadata_by_id(id, {"toggle": True})
+
if function:
return function
else:
@@ -413,8 +431,10 @@ async def update_function_valves_by_id(
try:
form_data = {k: v for k, v in form_data.items() if v is not None}
valves = Valves(**form_data)
- Functions.update_function_valves_by_id(id, valves.model_dump())
- return valves.model_dump()
+
+ valves_dict = valves.model_dump(exclude_unset=True)
+ Functions.update_function_valves_by_id(id, valves_dict)
+ return valves_dict
except Exception as e:
log.exception(f"Error updating function values by id {id}: {e}")
raise HTTPException(
@@ -496,10 +516,11 @@ async def update_function_user_valves_by_id(
try:
form_data = {k: v for k, v in form_data.items() if v is not None}
user_valves = UserValves(**form_data)
+ user_valves_dict = user_valves.model_dump(exclude_unset=True)
Functions.update_user_valves_by_id_and_user_id(
- id, user.id, user_valves.model_dump()
+ id, user.id, user_valves_dict
)
- return user_valves.model_dump()
+ return user_valves_dict
except Exception as e:
log.exception(f"Error updating function user valves by id {id}: {e}")
raise HTTPException(
diff --git a/backend/open_webui/routers/images.py b/backend/open_webui/routers/images.py
index 802a3e9924..059b3a23d7 100644
--- a/backend/open_webui/routers/images.py
+++ b/backend/open_webui/routers/images.py
@@ -514,6 +514,7 @@ async def image_generations(
size = form_data.size
width, height = tuple(map(int, size.split("x")))
+ model = get_image_model(request)
r = None
try:
@@ -531,11 +532,7 @@ async def image_generations(
headers["X-OpenWebUI-User-Role"] = user.role
data = {
- "model": (
- request.app.state.config.IMAGE_GENERATION_MODEL
- if request.app.state.config.IMAGE_GENERATION_MODEL != ""
- else "dall-e-2"
- ),
+ "model": model,
"prompt": form_data.prompt,
"n": form_data.n,
"size": (
@@ -584,7 +581,6 @@ async def image_generations(
headers["Content-Type"] = "application/json"
headers["x-goog-api-key"] = request.app.state.config.IMAGES_GEMINI_API_KEY
- model = get_image_model(request)
data = {
"instances": {"prompt": form_data.prompt},
"parameters": {
@@ -640,7 +636,7 @@ async def image_generations(
}
)
res = await comfyui_generate_image(
- request.app.state.config.IMAGE_GENERATION_MODEL,
+ model,
form_data,
user.id,
request.app.state.config.COMFYUI_BASE_URL,
diff --git a/backend/open_webui/routers/models.py b/backend/open_webui/routers/models.py
index a4d4e3668e..05d7c68006 100644
--- a/backend/open_webui/routers/models.py
+++ b/backend/open_webui/routers/models.py
@@ -1,4 +1,6 @@
from typing import Optional
+import io
+import base64
from open_webui.models.models import (
ModelForm,
@@ -10,12 +12,13 @@ from open_webui.models.models import (
from pydantic import BaseModel
from open_webui.constants import ERROR_MESSAGES
-from fastapi import APIRouter, Depends, HTTPException, Request, status
+from fastapi import APIRouter, Depends, HTTPException, Request, status, Response
+from fastapi.responses import FileResponse, StreamingResponse
from open_webui.utils.auth import get_admin_user, get_verified_user
from open_webui.utils.access_control import has_access, has_permission
-from open_webui.config import BYPASS_ADMIN_ACCESS_CONTROL
+from open_webui.config import BYPASS_ADMIN_ACCESS_CONTROL, STATIC_DIR
router = APIRouter()
@@ -129,6 +132,39 @@ async def get_model_by_id(id: str, user=Depends(get_verified_user)):
)
+###########################
+# GetModelById
+###########################
+
+
+@router.get("/model/profile/image")
+async def get_model_profile_image(id: str, user=Depends(get_verified_user)):
+ model = Models.get_model_by_id(id)
+ if model:
+ if model.meta.profile_image_url:
+ if model.meta.profile_image_url.startswith("http"):
+ return Response(
+ status_code=status.HTTP_302_FOUND,
+ headers={"Location": model.meta.profile_image_url},
+ )
+ elif model.meta.profile_image_url.startswith("data:image"):
+ try:
+ header, base64_data = model.meta.profile_image_url.split(",", 1)
+ image_data = base64.b64decode(base64_data)
+ image_buffer = io.BytesIO(image_data)
+
+ return StreamingResponse(
+ image_buffer,
+ media_type="image/png",
+ headers={"Content-Disposition": "inline; filename=image.png"},
+ )
+ except Exception as e:
+ pass
+ return FileResponse(f"{STATIC_DIR}/favicon.png")
+ else:
+ return FileResponse(f"{STATIC_DIR}/favicon.png")
+
+
############################
# ToggleModelById
############################
diff --git a/backend/open_webui/routers/notes.py b/backend/open_webui/routers/notes.py
index 375f59ff6c..0c420e4f12 100644
--- a/backend/open_webui/routers/notes.py
+++ b/backend/open_webui/routers/notes.py
@@ -48,7 +48,7 @@ async def get_notes(request: Request, user=Depends(get_verified_user)):
"user": UserResponse(**Users.get_user_by_id(note.user_id).model_dump()),
}
)
- for note in Notes.get_notes_by_user_id(user.id, "write")
+ for note in Notes.get_notes_by_permission(user.id, "write")
]
return notes
@@ -62,8 +62,9 @@ class NoteTitleIdResponse(BaseModel):
@router.get("/list", response_model=list[NoteTitleIdResponse])
-async def get_note_list(request: Request, user=Depends(get_verified_user)):
-
+async def get_note_list(
+ request: Request, page: Optional[int] = None, user=Depends(get_verified_user)
+):
if user.role != "admin" and not has_permission(
user.id, "features.notes", request.app.state.config.USER_PERMISSIONS
):
@@ -72,9 +73,17 @@ async def get_note_list(request: Request, user=Depends(get_verified_user)):
detail=ERROR_MESSAGES.UNAUTHORIZED,
)
+ limit = None
+ skip = None
+ if page is not None:
+ limit = 60
+ skip = (page - 1) * limit
+
notes = [
NoteTitleIdResponse(**note.model_dump())
- for note in Notes.get_notes_by_user_id(user.id, "write")
+ for note in Notes.get_notes_by_permission(
+ user.id, "write", skip=skip, limit=limit
+ )
]
return notes
diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py
index 8dadf3523a..bf11ffa0dd 100644
--- a/backend/open_webui/routers/ollama.py
+++ b/backend/open_webui/routers/ollama.py
@@ -1694,25 +1694,27 @@ async def download_file_stream(
yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
if done:
- file.seek(0)
- chunk_size = 1024 * 1024 * 2
- hashed = calculate_sha256(file, chunk_size)
- file.seek(0)
+ file.close()
- url = f"{ollama_url}/api/blobs/sha256:{hashed}"
- response = requests.post(url, data=file)
+ with open(file_path, "rb") as file:
+ chunk_size = 1024 * 1024 * 2
+ hashed = calculate_sha256(file, chunk_size)
- if response.ok:
- res = {
- "done": done,
- "blob": f"sha256:{hashed}",
- "name": file_name,
- }
- os.remove(file_path)
+ url = f"{ollama_url}/api/blobs/sha256:{hashed}"
+ with requests.Session() as session:
+ response = session.post(url, data=file, timeout=30)
- yield f"data: {json.dumps(res)}\n\n"
- else:
- raise "Ollama: Could not create blob, Please try again."
+ if response.ok:
+ res = {
+ "done": done,
+ "blob": f"sha256:{hashed}",
+ "name": file_name,
+ }
+ os.remove(file_path)
+
+ yield f"data: {json.dumps(res)}\n\n"
+ else:
+ raise "Ollama: Could not create blob, Please try again."
# url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py
index 184f47038d..e8865b90a0 100644
--- a/backend/open_webui/routers/openai.py
+++ b/backend/open_webui/routers/openai.py
@@ -9,6 +9,8 @@ from aiocache import cached
import requests
from urllib.parse import quote
+from azure.identity import DefaultAzureCredential, get_bearer_token_provider
+
from fastapi import Depends, HTTPException, Request, APIRouter
from fastapi.responses import (
FileResponse,
@@ -119,7 +121,7 @@ def openai_reasoning_model_handler(payload):
return payload
-def get_headers_and_cookies(
+async def get_headers_and_cookies(
request: Request,
url,
key=None,
@@ -171,22 +173,41 @@ def get_headers_and_cookies(
oauth_token = None
try:
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
- user.id,
- request.cookies.get("oauth_session_id", None),
- )
+ if request.cookies.get("oauth_session_id", None):
+ oauth_token = await request.app.state.oauth_manager.get_oauth_token(
+ user.id,
+ request.cookies.get("oauth_session_id", None),
+ )
except Exception as e:
log.error(f"Error getting OAuth token: {e}")
if oauth_token:
token = f"{oauth_token.get('access_token', '')}"
+ elif auth_type in ("azure_ad", "microsoft_entra_id"):
+ token = get_microsoft_entra_id_access_token()
+
if token:
headers["Authorization"] = f"Bearer {token}"
return headers, cookies
+def get_microsoft_entra_id_access_token():
+ """
+ Get Microsoft Entra ID access token using DefaultAzureCredential for Azure OpenAI.
+ Returns the token string or None if authentication fails.
+ """
+ try:
+ token_provider = get_bearer_token_provider(
+ DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
+ )
+ return token_provider()
+ except Exception as e:
+ log.error(f"Error getting Microsoft Entra ID access token: {e}")
+ return None
+
+
##########################################
#
# API routes
@@ -284,7 +305,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
request.app.state.config.OPENAI_API_CONFIGS.get(url, {}), # Legacy support
)
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, user=user
)
@@ -549,7 +570,7 @@ async def get_models(
timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST),
) as session:
try:
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, user=user
)
@@ -635,14 +656,17 @@ async def verify_connection(
timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST),
) as session:
try:
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, user=user
)
if api_config.get("azure", False):
- headers["api-key"] = key
- api_version = api_config.get("api_version", "") or "2023-03-15-preview"
+ # Only set api-key header if not using Azure Entra ID authentication
+ auth_type = api_config.get("auth_type", "bearer")
+ if auth_type not in ("azure_ad", "microsoft_entra_id"):
+ headers["api-key"] = key
+ api_version = api_config.get("api_version", "") or "2023-03-15-preview"
async with session.get(
url=f"{url}/openai/models?api-version={api_version}",
headers=headers,
@@ -877,14 +901,19 @@ async def generate_chat_completion(
convert_logit_bias_input_to_json(payload["logit_bias"])
)
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, metadata, user=user
)
if api_config.get("azure", False):
api_version = api_config.get("api_version", "2023-03-15-preview")
request_url, payload = convert_to_azure_payload(url, payload, api_version)
- headers["api-key"] = key
+
+ # Only set api-key header if not using Azure Entra ID authentication
+ auth_type = api_config.get("auth_type", "bearer")
+ if auth_type not in ("azure_ad", "microsoft_entra_id"):
+ headers["api-key"] = key
+
headers["api-version"] = api_version
request_url = f"{request_url}/chat/completions?api-version={api_version}"
else:
@@ -981,7 +1010,9 @@ async def embeddings(request: Request, form_data: dict, user):
session = None
streaming = False
- headers, cookies = get_headers_and_cookies(request, url, key, api_config, user=user)
+ headers, cookies = await get_headers_and_cookies(
+ request, url, key, api_config, user=user
+ )
try:
session = aiohttp.ClientSession(trust_env=True)
r = await session.request(
@@ -1051,13 +1082,18 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
streaming = False
try:
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, user=user
)
if api_config.get("azure", False):
api_version = api_config.get("api_version", "2023-03-15-preview")
- headers["api-key"] = key
+
+ # Only set api-key header if not using Azure Entra ID authentication
+ auth_type = api_config.get("auth_type", "bearer")
+ if auth_type not in ("azure_ad", "microsoft_entra_id"):
+ headers["api-key"] = key
+
headers["api-version"] = api_version
payload = json.loads(body)
diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py
index dd5e2d5bc4..3681008c87 100644
--- a/backend/open_webui/routers/retrieval.py
+++ b/backend/open_webui/routers/retrieval.py
@@ -45,6 +45,8 @@ from open_webui.retrieval.loaders.youtube import YoutubeLoader
# Web search engines
from open_webui.retrieval.web.main import SearchResult
from open_webui.retrieval.web.utils import get_web_loader
+from open_webui.retrieval.web.ollama import search_ollama_cloud
+from open_webui.retrieval.web.perplexity_search import search_perplexity_search
from open_webui.retrieval.web.brave import search_brave
from open_webui.retrieval.web.kagi import search_kagi
from open_webui.retrieval.web.mojeek import search_mojeek
@@ -469,6 +471,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)):
"WEB_SEARCH_DOMAIN_FILTER_LIST": request.app.state.config.WEB_SEARCH_DOMAIN_FILTER_LIST,
"BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL,
"BYPASS_WEB_SEARCH_WEB_LOADER": request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER,
+ "OLLAMA_CLOUD_WEB_SEARCH_API_KEY": request.app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY,
"SEARXNG_QUERY_URL": request.app.state.config.SEARXNG_QUERY_URL,
"YACY_QUERY_URL": request.app.state.config.YACY_QUERY_URL,
"YACY_USERNAME": request.app.state.config.YACY_USERNAME,
@@ -525,6 +528,7 @@ class WebConfig(BaseModel):
WEB_SEARCH_DOMAIN_FILTER_LIST: Optional[List[str]] = []
BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL: Optional[bool] = None
BYPASS_WEB_SEARCH_WEB_LOADER: Optional[bool] = None
+ OLLAMA_CLOUD_WEB_SEARCH_API_KEY: Optional[str] = None
SEARXNG_QUERY_URL: Optional[str] = None
YACY_QUERY_URL: Optional[str] = None
YACY_USERNAME: Optional[str] = None
@@ -988,6 +992,9 @@ async def update_rag_config(
request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER = (
form_data.web.BYPASS_WEB_SEARCH_WEB_LOADER
)
+ request.app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY = (
+ form_data.web.OLLAMA_CLOUD_WEB_SEARCH_API_KEY
+ )
request.app.state.config.SEARXNG_QUERY_URL = form_data.web.SEARXNG_QUERY_URL
request.app.state.config.YACY_QUERY_URL = form_data.web.YACY_QUERY_URL
request.app.state.config.YACY_USERNAME = form_data.web.YACY_USERNAME
@@ -1139,6 +1146,7 @@ async def update_rag_config(
"WEB_SEARCH_DOMAIN_FILTER_LIST": request.app.state.config.WEB_SEARCH_DOMAIN_FILTER_LIST,
"BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL,
"BYPASS_WEB_SEARCH_WEB_LOADER": request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER,
+ "OLLAMA_CLOUD_WEB_SEARCH_API_KEY": request.app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY,
"SEARXNG_QUERY_URL": request.app.state.config.SEARXNG_QUERY_URL,
"YACY_QUERY_URL": request.app.state.config.YACY_QUERY_URL,
"YACY_USERNAME": request.app.state.config.YACY_USERNAME,
@@ -1334,7 +1342,7 @@ def save_docs_to_vector_db(
)
return True
- log.info(f"adding to collection {collection_name}")
+ log.info(f"generating embeddings for {collection_name}")
embedding_function = get_embedding_function(
request.app.state.config.RAG_EMBEDDING_ENGINE,
request.app.state.config.RAG_EMBEDDING_MODEL,
@@ -1370,6 +1378,7 @@ def save_docs_to_vector_db(
prefix=RAG_EMBEDDING_CONTENT_PREFIX,
user=user,
)
+ log.info(f"embeddings generated {len(embeddings)} for {len(texts)} items")
items = [
{
@@ -1381,11 +1390,13 @@ def save_docs_to_vector_db(
for idx, text in enumerate(texts)
]
+ log.info(f"adding to collection {collection_name}")
VECTOR_DB_CLIENT.insert(
collection_name=collection_name,
items=items,
)
+ log.info(f"added {len(items)} items to collection {collection_name}")
return True
except Exception as e:
log.exception(e)
@@ -1404,203 +1415,228 @@ def process_file(
form_data: ProcessFileForm,
user=Depends(get_verified_user),
):
- try:
+ if user.role == "admin":
file = Files.get_file_by_id(form_data.file_id)
+ else:
+ file = Files.get_file_by_id_and_user_id(form_data.file_id, user.id)
- collection_name = form_data.collection_name
+ if file:
+ try:
- if collection_name is None:
- collection_name = f"file-{file.id}"
+ collection_name = form_data.collection_name
- if form_data.content:
- # Update the content in the file
- # Usage: /files/{file_id}/data/content/update, /files/ (audio file upload pipeline)
+ if collection_name is None:
+ collection_name = f"file-{file.id}"
- try:
- # /files/{file_id}/data/content/update
- VECTOR_DB_CLIENT.delete_collection(collection_name=f"file-{file.id}")
- except:
- # Audio file upload pipeline
- pass
+ if form_data.content:
+ # Update the content in the file
+ # Usage: /files/{file_id}/data/content/update, /files/ (audio file upload pipeline)
- docs = [
- Document(
- page_content=form_data.content.replace("
", "\n"),
- metadata={
- **file.meta,
- "name": file.filename,
- "created_by": file.user_id,
- "file_id": file.id,
- "source": file.filename,
- },
+ try:
+ # /files/{file_id}/data/content/update
+ VECTOR_DB_CLIENT.delete_collection(
+ collection_name=f"file-{file.id}"
+ )
+ except:
+ # Audio file upload pipeline
+ pass
+
+ docs = [
+ Document(
+ page_content=form_data.content.replace("
", "\n"),
+ metadata={
+ **file.meta,
+ "name": file.filename,
+ "created_by": file.user_id,
+ "file_id": file.id,
+ "source": file.filename,
+ },
+ )
+ ]
+
+ text_content = form_data.content
+ elif form_data.collection_name:
+ # Check if the file has already been processed and save the content
+ # Usage: /knowledge/{id}/file/add, /knowledge/{id}/file/update
+
+ result = VECTOR_DB_CLIENT.query(
+ collection_name=f"file-{file.id}", filter={"file_id": file.id}
)
- ]
- text_content = form_data.content
- elif form_data.collection_name:
- # Check if the file has already been processed and save the content
- # Usage: /knowledge/{id}/file/add, /knowledge/{id}/file/update
+ if result is not None and len(result.ids[0]) > 0:
+ docs = [
+ Document(
+ page_content=result.documents[0][idx],
+ metadata=result.metadatas[0][idx],
+ )
+ for idx, id in enumerate(result.ids[0])
+ ]
+ else:
+ docs = [
+ Document(
+ page_content=file.data.get("content", ""),
+ metadata={
+ **file.meta,
+ "name": file.filename,
+ "created_by": file.user_id,
+ "file_id": file.id,
+ "source": file.filename,
+ },
+ )
+ ]
- result = VECTOR_DB_CLIENT.query(
- collection_name=f"file-{file.id}", filter={"file_id": file.id}
+ text_content = file.data.get("content", "")
+ else:
+ # Process the file and save the content
+ # Usage: /files/
+ file_path = file.path
+ if file_path:
+ file_path = Storage.get_file(file_path)
+ loader = Loader(
+ engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE,
+ DATALAB_MARKER_API_KEY=request.app.state.config.DATALAB_MARKER_API_KEY,
+ DATALAB_MARKER_API_BASE_URL=request.app.state.config.DATALAB_MARKER_API_BASE_URL,
+ DATALAB_MARKER_ADDITIONAL_CONFIG=request.app.state.config.DATALAB_MARKER_ADDITIONAL_CONFIG,
+ DATALAB_MARKER_SKIP_CACHE=request.app.state.config.DATALAB_MARKER_SKIP_CACHE,
+ DATALAB_MARKER_FORCE_OCR=request.app.state.config.DATALAB_MARKER_FORCE_OCR,
+ DATALAB_MARKER_PAGINATE=request.app.state.config.DATALAB_MARKER_PAGINATE,
+ DATALAB_MARKER_STRIP_EXISTING_OCR=request.app.state.config.DATALAB_MARKER_STRIP_EXISTING_OCR,
+ DATALAB_MARKER_DISABLE_IMAGE_EXTRACTION=request.app.state.config.DATALAB_MARKER_DISABLE_IMAGE_EXTRACTION,
+ DATALAB_MARKER_FORMAT_LINES=request.app.state.config.DATALAB_MARKER_FORMAT_LINES,
+ DATALAB_MARKER_USE_LLM=request.app.state.config.DATALAB_MARKER_USE_LLM,
+ DATALAB_MARKER_OUTPUT_FORMAT=request.app.state.config.DATALAB_MARKER_OUTPUT_FORMAT,
+ EXTERNAL_DOCUMENT_LOADER_URL=request.app.state.config.EXTERNAL_DOCUMENT_LOADER_URL,
+ EXTERNAL_DOCUMENT_LOADER_API_KEY=request.app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY,
+ TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL,
+ DOCLING_SERVER_URL=request.app.state.config.DOCLING_SERVER_URL,
+ DOCLING_PARAMS={
+ "do_ocr": request.app.state.config.DOCLING_DO_OCR,
+ "force_ocr": request.app.state.config.DOCLING_FORCE_OCR,
+ "ocr_engine": request.app.state.config.DOCLING_OCR_ENGINE,
+ "ocr_lang": request.app.state.config.DOCLING_OCR_LANG,
+ "pdf_backend": request.app.state.config.DOCLING_PDF_BACKEND,
+ "table_mode": request.app.state.config.DOCLING_TABLE_MODE,
+ "pipeline": request.app.state.config.DOCLING_PIPELINE,
+ "do_picture_description": request.app.state.config.DOCLING_DO_PICTURE_DESCRIPTION,
+ "picture_description_mode": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE,
+ "picture_description_local": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL,
+ "picture_description_api": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_API,
+ },
+ PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES,
+ DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
+ DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
+ MISTRAL_OCR_API_KEY=request.app.state.config.MISTRAL_OCR_API_KEY,
+ )
+ docs = loader.load(
+ file.filename, file.meta.get("content_type"), file_path
+ )
+
+ docs = [
+ Document(
+ page_content=doc.page_content,
+ metadata={
+ **doc.metadata,
+ "name": file.filename,
+ "created_by": file.user_id,
+ "file_id": file.id,
+ "source": file.filename,
+ },
+ )
+ for doc in docs
+ ]
+ else:
+ docs = [
+ Document(
+ page_content=file.data.get("content", ""),
+ metadata={
+ **file.meta,
+ "name": file.filename,
+ "created_by": file.user_id,
+ "file_id": file.id,
+ "source": file.filename,
+ },
+ )
+ ]
+ text_content = " ".join([doc.page_content for doc in docs])
+
+ log.debug(f"text_content: {text_content}")
+ Files.update_file_data_by_id(
+ file.id,
+ {"content": text_content},
)
+ hash = calculate_sha256_string(text_content)
+ Files.update_file_hash_by_id(file.id, hash)
- if result is not None and len(result.ids[0]) > 0:
- docs = [
- Document(
- page_content=result.documents[0][idx],
- metadata=result.metadatas[0][idx],
- )
- for idx, id in enumerate(result.ids[0])
- ]
+ if request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL:
+ Files.update_file_data_by_id(file.id, {"status": "completed"})
+ return {
+ "status": True,
+ "collection_name": None,
+ "filename": file.filename,
+ "content": text_content,
+ }
else:
- docs = [
- Document(
- page_content=file.data.get("content", ""),
+ try:
+ result = save_docs_to_vector_db(
+ request,
+ docs=docs,
+ collection_name=collection_name,
metadata={
- **file.meta,
- "name": file.filename,
- "created_by": file.user_id,
"file_id": file.id,
- "source": file.filename,
- },
- )
- ]
-
- text_content = file.data.get("content", "")
- else:
- # Process the file and save the content
- # Usage: /files/
- file_path = file.path
- if file_path:
- file_path = Storage.get_file(file_path)
- loader = Loader(
- engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE,
- DATALAB_MARKER_API_KEY=request.app.state.config.DATALAB_MARKER_API_KEY,
- DATALAB_MARKER_API_BASE_URL=request.app.state.config.DATALAB_MARKER_API_BASE_URL,
- DATALAB_MARKER_ADDITIONAL_CONFIG=request.app.state.config.DATALAB_MARKER_ADDITIONAL_CONFIG,
- DATALAB_MARKER_SKIP_CACHE=request.app.state.config.DATALAB_MARKER_SKIP_CACHE,
- DATALAB_MARKER_FORCE_OCR=request.app.state.config.DATALAB_MARKER_FORCE_OCR,
- DATALAB_MARKER_PAGINATE=request.app.state.config.DATALAB_MARKER_PAGINATE,
- DATALAB_MARKER_STRIP_EXISTING_OCR=request.app.state.config.DATALAB_MARKER_STRIP_EXISTING_OCR,
- DATALAB_MARKER_DISABLE_IMAGE_EXTRACTION=request.app.state.config.DATALAB_MARKER_DISABLE_IMAGE_EXTRACTION,
- DATALAB_MARKER_FORMAT_LINES=request.app.state.config.DATALAB_MARKER_FORMAT_LINES,
- DATALAB_MARKER_USE_LLM=request.app.state.config.DATALAB_MARKER_USE_LLM,
- DATALAB_MARKER_OUTPUT_FORMAT=request.app.state.config.DATALAB_MARKER_OUTPUT_FORMAT,
- EXTERNAL_DOCUMENT_LOADER_URL=request.app.state.config.EXTERNAL_DOCUMENT_LOADER_URL,
- EXTERNAL_DOCUMENT_LOADER_API_KEY=request.app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY,
- TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL,
- DOCLING_SERVER_URL=request.app.state.config.DOCLING_SERVER_URL,
- DOCLING_PARAMS={
- "do_ocr": request.app.state.config.DOCLING_DO_OCR,
- "force_ocr": request.app.state.config.DOCLING_FORCE_OCR,
- "ocr_engine": request.app.state.config.DOCLING_OCR_ENGINE,
- "ocr_lang": request.app.state.config.DOCLING_OCR_LANG,
- "pdf_backend": request.app.state.config.DOCLING_PDF_BACKEND,
- "table_mode": request.app.state.config.DOCLING_TABLE_MODE,
- "pipeline": request.app.state.config.DOCLING_PIPELINE,
- "do_picture_description": request.app.state.config.DOCLING_DO_PICTURE_DESCRIPTION,
- "picture_description_mode": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE,
- "picture_description_local": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL,
- "picture_description_api": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_API,
- },
- PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES,
- DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
- DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
- MISTRAL_OCR_API_KEY=request.app.state.config.MISTRAL_OCR_API_KEY,
- )
- docs = loader.load(
- file.filename, file.meta.get("content_type"), file_path
- )
-
- docs = [
- Document(
- page_content=doc.page_content,
- metadata={
- **doc.metadata,
"name": file.filename,
- "created_by": file.user_id,
- "file_id": file.id,
- "source": file.filename,
+ "hash": hash,
},
+ add=(True if form_data.collection_name else False),
+ user=user,
)
- for doc in docs
- ]
- else:
- docs = [
- Document(
- page_content=file.data.get("content", ""),
- metadata={
- **file.meta,
- "name": file.filename,
- "created_by": file.user_id,
- "file_id": file.id,
- "source": file.filename,
- },
- )
- ]
- text_content = " ".join([doc.page_content for doc in docs])
+ log.info(f"added {len(docs)} items to collection {collection_name}")
- log.debug(f"text_content: {text_content}")
- Files.update_file_data_by_id(
- file.id,
- {"status": "completed", "content": text_content},
- )
+ if result:
+ Files.update_file_metadata_by_id(
+ file.id,
+ {
+ "collection_name": collection_name,
+ },
+ )
- hash = calculate_sha256_string(text_content)
- Files.update_file_hash_by_id(file.id, hash)
+ Files.update_file_data_by_id(
+ file.id,
+ {"status": "completed"},
+ )
- if not request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL:
- try:
- result = save_docs_to_vector_db(
- request,
- docs=docs,
- collection_name=collection_name,
- metadata={
- "file_id": file.id,
- "name": file.filename,
- "hash": hash,
- },
- add=(True if form_data.collection_name else False),
- user=user,
- )
-
- if result:
- Files.update_file_metadata_by_id(
- file.id,
- {
+ return {
+ "status": True,
"collection_name": collection_name,
- },
- )
+ "filename": file.filename,
+ "content": text_content,
+ }
+ else:
+ raise Exception("Error saving document to vector database")
+ except Exception as e:
+ raise e
- return {
- "status": True,
- "collection_name": collection_name,
- "filename": file.filename,
- "content": text_content,
- }
- except Exception as e:
- raise e
- else:
- return {
- "status": True,
- "collection_name": None,
- "filename": file.filename,
- "content": text_content,
- }
+ except Exception as e:
+ log.exception(e)
+ Files.update_file_data_by_id(
+ file.id,
+ {"status": "failed"},
+ )
- except Exception as e:
- log.exception(e)
- if "No pandoc was found" in str(e):
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.PANDOC_NOT_INSTALLED,
- )
- else:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=str(e),
- )
+ if "No pandoc was found" in str(e):
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail=ERROR_MESSAGES.PANDOC_NOT_INSTALLED,
+ )
+ else:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail=str(e),
+ )
+
+ else:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND, detail=ERROR_MESSAGES.NOT_FOUND
+ )
class ProcessTextForm(BaseModel):
@@ -1758,7 +1794,25 @@ def search_web(request: Request, engine: str, query: str) -> list[SearchResult]:
"""
# TODO: add playwright to search the web
- if engine == "searxng":
+ if engine == "ollama_cloud":
+ return search_ollama_cloud(
+ "https://ollama.com",
+ request.app.state.config.OLLAMA_CLOUD_WEB_SEARCH_API_KEY,
+ query,
+ request.app.state.config.WEB_SEARCH_RESULT_COUNT,
+ request.app.state.config.WEB_SEARCH_DOMAIN_FILTER_LIST,
+ )
+ elif engine == "perplexity_search":
+ if request.app.state.config.PERPLEXITY_API_KEY:
+ return search_perplexity_search(
+ request.app.state.config.PERPLEXITY_API_KEY,
+ query,
+ request.app.state.config.WEB_SEARCH_RESULT_COUNT,
+ request.app.state.config.WEB_SEARCH_DOMAIN_FILTER_LIST,
+ )
+ else:
+ raise Exception("No PERPLEXITY_API_KEY found in environment variables")
+ elif engine == "searxng":
if request.app.state.config.SEARXNG_QUERY_URL:
return search_searxng(
request.app.state.config.SEARXNG_QUERY_URL,
diff --git a/backend/open_webui/routers/tools.py b/backend/open_webui/routers/tools.py
index 5f82e7f1bd..eb66a86825 100644
--- a/backend/open_webui/routers/tools.py
+++ b/backend/open_webui/routers/tools.py
@@ -9,6 +9,7 @@ from pydantic import BaseModel, HttpUrl
from fastapi import APIRouter, Depends, HTTPException, Request, status
+from open_webui.models.oauth_sessions import OAuthSessions
from open_webui.models.tools import (
ToolForm,
ToolModel,
@@ -41,8 +42,17 @@ router = APIRouter()
@router.get("/", response_model=list[ToolUserResponse])
async def get_tools(request: Request, user=Depends(get_verified_user)):
- tools = Tools.get_tools()
+ tools = [
+ ToolUserResponse(
+ **{
+ **tool.model_dump(),
+ "has_user_valves": "class UserValves(BaseModel):" in tool.content,
+ }
+ )
+ for tool in Tools.get_tools()
+ ]
+ # OpenAPI Tool Servers
for server in await get_tool_servers(request):
tools.append(
ToolUserResponse(
@@ -68,6 +78,50 @@ async def get_tools(request: Request, user=Depends(get_verified_user)):
)
)
+ # MCP Tool Servers
+ for server in request.app.state.config.TOOL_SERVER_CONNECTIONS:
+ if server.get("type", "openapi") == "mcp":
+ server_id = server.get("info", {}).get("id")
+ auth_type = server.get("auth_type", "none")
+
+ session_token = None
+ if auth_type == "oauth_2.1":
+ splits = server_id.split(":")
+ server_id = splits[-1] if len(splits) > 1 else server_id
+
+ session_token = (
+ await request.app.state.oauth_client_manager.get_oauth_token(
+ user.id, f"mcp:{server_id}"
+ )
+ )
+
+ tools.append(
+ ToolUserResponse(
+ **{
+ "id": f"server:mcp:{server.get('info', {}).get('id')}",
+ "user_id": f"server:mcp:{server.get('info', {}).get('id')}",
+ "name": server.get("info", {}).get("name", "MCP Tool Server"),
+ "meta": {
+ "description": server.get("info", {}).get(
+ "description", ""
+ ),
+ },
+ "access_control": server.get("config", {}).get(
+ "access_control", None
+ ),
+ "updated_at": int(time.time()),
+ "created_at": int(time.time()),
+ **(
+ {
+ "authenticated": session_token is not None,
+ }
+ if auth_type == "oauth_2.1"
+ else {}
+ ),
+ }
+ )
+ )
+
if user.role == "admin" and BYPASS_ADMIN_ACCESS_CONTROL:
# Admin can see all tools
return tools
@@ -462,8 +516,9 @@ async def update_tools_valves_by_id(
try:
form_data = {k: v for k, v in form_data.items() if v is not None}
valves = Valves(**form_data)
- Tools.update_tool_valves_by_id(id, valves.model_dump())
- return valves.model_dump()
+ valves_dict = valves.model_dump(exclude_unset=True)
+ Tools.update_tool_valves_by_id(id, valves_dict)
+ return valves_dict
except Exception as e:
log.exception(f"Failed to update tool valves by id {id}: {e}")
raise HTTPException(
@@ -538,10 +593,11 @@ async def update_tools_user_valves_by_id(
try:
form_data = {k: v for k, v in form_data.items() if v is not None}
user_valves = UserValves(**form_data)
+ user_valves_dict = user_valves.model_dump(exclude_unset=True)
Tools.update_user_valves_by_id_and_user_id(
- id, user.id, user_valves.model_dump()
+ id, user.id, user_valves_dict
)
- return user_valves.model_dump()
+ return user_valves_dict
except Exception as e:
log.exception(f"Failed to update user valves by id {id}: {e}")
raise HTTPException(
diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py
index 5b331dce73..9a0f8c6aaf 100644
--- a/backend/open_webui/routers/users.py
+++ b/backend/open_webui/routers/users.py
@@ -18,6 +18,7 @@ from open_webui.models.users import (
UserModel,
UserListResponse,
UserInfoListResponse,
+ UserIdNameListResponse,
UserRoleUpdateForm,
Users,
UserSettings,
@@ -100,6 +101,23 @@ async def get_all_users(
return Users.get_users()
+@router.get("/search", response_model=UserIdNameListResponse)
+async def search_users(
+ query: Optional[str] = None,
+ user=Depends(get_verified_user),
+):
+ limit = PAGE_ITEM_COUNT
+
+ page = 1 # Always return the first page for search
+ skip = (page - 1) * limit
+
+ filter = {}
+ if query:
+ filter["query"] = query
+
+ return Users.get_users(filter=filter, skip=skip, limit=limit)
+
+
############################
# User Groups
############################
diff --git a/backend/open_webui/utils/access_control.py b/backend/open_webui/utils/access_control.py
index 1529773c44..af48bebfb4 100644
--- a/backend/open_webui/utils/access_control.py
+++ b/backend/open_webui/utils/access_control.py
@@ -110,9 +110,13 @@ def has_access(
type: str = "write",
access_control: Optional[dict] = None,
user_group_ids: Optional[Set[str]] = None,
+ strict: bool = True,
) -> bool:
if access_control is None:
- return type == "read"
+ if strict:
+ return type == "read"
+ else:
+ return True
if user_group_ids is None:
user_groups = Groups.get_groups_by_member_id(user_id)
@@ -130,9 +134,10 @@ def has_access(
# Get all users with access to a resource
def get_users_with_access(
type: str = "write", access_control: Optional[dict] = None
-) -> List[UserModel]:
+) -> list[UserModel]:
if access_control is None:
- return Users.get_users()
+ result = Users.get_users()
+ return result.get("users", [])
permission_access = access_control.get(type, {})
permitted_group_ids = permission_access.get("group_ids", [])
diff --git a/backend/open_webui/utils/channels.py b/backend/open_webui/utils/channels.py
new file mode 100644
index 0000000000..312b5ea24c
--- /dev/null
+++ b/backend/open_webui/utils/channels.py
@@ -0,0 +1,31 @@
+import re
+
+
+def extract_mentions(message: str, triggerChar: str = "@"):
+ # Escape triggerChar in case it's a regex special character
+ triggerChar = re.escape(triggerChar)
+ pattern = rf"<{triggerChar}([A-Z]):([^|>]+)"
+
+ matches = re.findall(pattern, message)
+ return [{"id_type": id_type, "id": id_value} for id_type, id_value in matches]
+
+
+def replace_mentions(message: str, triggerChar: str = "@", use_label: bool = True):
+ """
+ Replace mentions in the message with either their label (after the pipe `|`)
+ or their id if no label exists.
+
+ Example:
+ "<@M:gpt-4.1|GPT-4>" -> "GPT-4" (if use_label=True)
+ "<@M:gpt-4.1|GPT-4>" -> "gpt-4.1" (if use_label=False)
+ """
+ # Escape triggerChar
+ triggerChar = re.escape(triggerChar)
+
+ def replacer(match):
+ id_type, id_value, label = match.groups()
+ return label if use_label and label else id_value
+
+ # Regex captures: idType, id, optional label
+ pattern = rf"<{triggerChar}([A-Z]):([^|>]+)(?:\|([^>]+))?>"
+ return re.sub(pattern, replacer, message)
diff --git a/backend/open_webui/utils/files.py b/backend/open_webui/utils/files.py
new file mode 100644
index 0000000000..b410cbab50
--- /dev/null
+++ b/backend/open_webui/utils/files.py
@@ -0,0 +1,97 @@
+from open_webui.routers.images import (
+ load_b64_image_data,
+ upload_image,
+)
+
+from fastapi import (
+ APIRouter,
+ Depends,
+ HTTPException,
+ Request,
+ UploadFile,
+)
+
+from open_webui.routers.files import upload_file_handler
+
+import mimetypes
+import base64
+import io
+
+
+def get_image_url_from_base64(request, base64_image_string, metadata, user):
+ if "data:image/png;base64" in base64_image_string:
+ image_url = ""
+ # Extract base64 image data from the line
+ image_data, content_type = load_b64_image_data(base64_image_string)
+ if image_data is not None:
+ image_url = upload_image(
+ request,
+ image_data,
+ content_type,
+ metadata,
+ user,
+ )
+ return image_url
+ return None
+
+
+def load_b64_audio_data(b64_str):
+ try:
+ if "," in b64_str:
+ header, b64_data = b64_str.split(",", 1)
+ else:
+ b64_data = b64_str
+ header = "data:audio/wav;base64"
+ audio_data = base64.b64decode(b64_data)
+ content_type = (
+ header.split(";")[0].split(":")[1] if ";" in header else "audio/wav"
+ )
+ return audio_data, content_type
+ except Exception as e:
+ print(f"Error decoding base64 audio data: {e}")
+ return None, None
+
+
+def upload_audio(request, audio_data, content_type, metadata, user):
+ audio_format = mimetypes.guess_extension(content_type)
+ file = UploadFile(
+ file=io.BytesIO(audio_data),
+ filename=f"generated-{audio_format}", # will be converted to a unique ID on upload_file
+ headers={
+ "content-type": content_type,
+ },
+ )
+ file_item = upload_file_handler(
+ request,
+ file=file,
+ metadata=metadata,
+ process=False,
+ user=user,
+ )
+ url = request.app.url_path_for("get_file_content_by_id", id=file_item.id)
+ return url
+
+
+def get_audio_url_from_base64(request, base64_audio_string, metadata, user):
+ if "data:audio/wav;base64" in base64_audio_string:
+ audio_url = ""
+ # Extract base64 audio data from the line
+ audio_data, content_type = load_b64_audio_data(base64_audio_string)
+ if audio_data is not None:
+ audio_url = upload_audio(
+ request,
+ audio_data,
+ content_type,
+ metadata,
+ user,
+ )
+ return audio_url
+ return None
+
+
+def get_file_url_from_base64(request, base64_file_string, metadata, user):
+ if "data:image/png;base64" in base64_file_string:
+ return get_image_url_from_base64(request, base64_file_string, metadata, user)
+ elif "data:audio/wav;base64" in base64_file_string:
+ return get_audio_url_from_base64(request, base64_file_string, metadata, user)
+ return None
diff --git a/backend/open_webui/utils/filter.py b/backend/open_webui/utils/filter.py
index 1986e55b64..663b4e3fb7 100644
--- a/backend/open_webui/utils/filter.py
+++ b/backend/open_webui/utils/filter.py
@@ -127,8 +127,10 @@ async def process_filter_functions(
raise e
# Handle file cleanup for inlet
- if skip_files and "files" in form_data.get("metadata", {}):
- del form_data["files"]
- del form_data["metadata"]["files"]
+ if skip_files:
+ if "files" in form_data.get("metadata", {}):
+ del form_data["metadata"]["files"]
+ if "files" in form_data:
+ del form_data["files"]
return form_data, {}
diff --git a/backend/open_webui/utils/mcp/client.py b/backend/open_webui/utils/mcp/client.py
new file mode 100644
index 0000000000..01df38886c
--- /dev/null
+++ b/backend/open_webui/utils/mcp/client.py
@@ -0,0 +1,110 @@
+import asyncio
+from typing import Optional
+from contextlib import AsyncExitStack
+
+from mcp import ClientSession
+from mcp.client.auth import OAuthClientProvider, TokenStorage
+from mcp.client.streamable_http import streamablehttp_client
+from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
+
+
+class MCPClient:
+ def __init__(self):
+ self.session: Optional[ClientSession] = None
+ self.exit_stack = AsyncExitStack()
+
+ async def connect(self, url: str, headers: Optional[dict] = None):
+ try:
+ self._streams_context = streamablehttp_client(url, headers=headers)
+
+ transport = await self.exit_stack.enter_async_context(self._streams_context)
+ read_stream, write_stream, _ = transport
+
+ self._session_context = ClientSession(
+ read_stream, write_stream
+ ) # pylint: disable=W0201
+
+ self.session = await self.exit_stack.enter_async_context(
+ self._session_context
+ )
+ await self.session.initialize()
+ except Exception as e:
+ await self.disconnect()
+ raise e
+
+ async def list_tool_specs(self) -> Optional[dict]:
+ if not self.session:
+ raise RuntimeError("MCP client is not connected.")
+
+ result = await self.session.list_tools()
+ tools = result.tools
+
+ tool_specs = []
+ for tool in tools:
+ name = tool.name
+ description = tool.description
+
+ inputSchema = tool.inputSchema
+
+ # TODO: handle outputSchema if needed
+ outputSchema = getattr(tool, "outputSchema", None)
+
+ tool_specs.append(
+ {"name": name, "description": description, "parameters": inputSchema}
+ )
+
+ return tool_specs
+
+ async def call_tool(
+ self, function_name: str, function_args: dict
+ ) -> Optional[dict]:
+ if not self.session:
+ raise RuntimeError("MCP client is not connected.")
+
+ result = await self.session.call_tool(function_name, function_args)
+ if not result:
+ raise Exception("No result returned from MCP tool call.")
+
+ result_dict = result.model_dump(mode="json")
+ result_content = result_dict.get("content", {})
+
+ if result.isError:
+ raise Exception(result_content)
+ else:
+ return result_content
+
+ async def list_resources(self, cursor: Optional[str] = None) -> Optional[dict]:
+ if not self.session:
+ raise RuntimeError("MCP client is not connected.")
+
+ result = await self.session.list_resources(cursor=cursor)
+ if not result:
+ raise Exception("No result returned from MCP list_resources call.")
+
+ result_dict = result.model_dump()
+ resources = result_dict.get("resources", [])
+
+ return resources
+
+ async def read_resource(self, uri: str) -> Optional[dict]:
+ if not self.session:
+ raise RuntimeError("MCP client is not connected.")
+
+ result = await self.session.read_resource(uri)
+ if not result:
+ raise Exception("No result returned from MCP read_resource call.")
+ result_dict = result.model_dump()
+
+ return result_dict
+
+ async def disconnect(self):
+ # Clean up and close the session
+ await self.exit_stack.aclose()
+
+ async def __aenter__(self):
+ await self.exit_stack.__aenter__()
+ return self
+
+ async def __aexit__(self, exc_type, exc_value, traceback):
+ await self.exit_stack.__aexit__(exc_type, exc_value, traceback)
+ await self.disconnect()
diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py
index ae2c96c6da..ff8c215607 100644
--- a/backend/open_webui/utils/middleware.py
+++ b/backend/open_webui/utils/middleware.py
@@ -20,9 +20,11 @@ from concurrent.futures import ThreadPoolExecutor
from fastapi import Request, HTTPException
+from fastapi.responses import HTMLResponse
from starlette.responses import Response, StreamingResponse, JSONResponse
+from open_webui.models.oauth_sessions import OAuthSessions
from open_webui.models.chats import Chats
from open_webui.models.folders import Folders
from open_webui.models.users import Users
@@ -52,6 +54,11 @@ from open_webui.routers.pipelines import (
from open_webui.routers.memories import query_memory, QueryMemoryForm
from open_webui.utils.webhook import post_webhook
+from open_webui.utils.files import (
+ get_audio_url_from_base64,
+ get_file_url_from_base64,
+ get_image_url_from_base64,
+)
from open_webui.models.users import UserModel
@@ -86,6 +93,7 @@ from open_webui.utils.filter import (
)
from open_webui.utils.code_interpreter import execute_code_jupyter
from open_webui.utils.payload import apply_system_prompt_to_body
+from open_webui.utils.mcp.client import MCPClient
from open_webui.config import (
@@ -144,12 +152,14 @@ async def chat_completion_tools_handler(
def get_tools_function_calling_payload(messages, task_model_id, content):
user_message = get_last_user_message(messages)
- history = "\n".join(
+
+ recent_messages = messages[-4:] if len(messages) > 4 else messages
+ chat_history = "\n".join(
f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
- for message in messages[::-1][:4]
+ for message in recent_messages
)
- prompt = f"History:\n{history}\nQuery: {user_message}"
+ prompt = f"History:\n{chat_history}\nQuery: {user_message}"
return {
"model": task_model_id,
@@ -631,48 +641,53 @@ async def chat_completion_files_handler(
sources = []
if files := body.get("metadata", {}).get("files", None):
+ # Check if all files are in full context mode
+ all_full_context = all(item.get("context") == "full" for item in files)
+
queries = []
- try:
- queries_response = await generate_queries(
- request,
- {
- "model": body["model"],
- "messages": body["messages"],
- "type": "retrieval",
- },
- user,
- )
- queries_response = queries_response["choices"][0]["message"]["content"]
-
+ if not all_full_context:
try:
- bracket_start = queries_response.find("{")
- bracket_end = queries_response.rfind("}") + 1
+ queries_response = await generate_queries(
+ request,
+ {
+ "model": body["model"],
+ "messages": body["messages"],
+ "type": "retrieval",
+ },
+ user,
+ )
+ queries_response = queries_response["choices"][0]["message"]["content"]
- if bracket_start == -1 or bracket_end == -1:
- raise Exception("No JSON object found in the response")
+ try:
+ bracket_start = queries_response.find("{")
+ bracket_end = queries_response.rfind("}") + 1
- queries_response = queries_response[bracket_start:bracket_end]
- queries_response = json.loads(queries_response)
- except Exception as e:
- queries_response = {"queries": [queries_response]}
+ if bracket_start == -1 or bracket_end == -1:
+ raise Exception("No JSON object found in the response")
- queries = queries_response.get("queries", [])
- except:
- pass
+ queries_response = queries_response[bracket_start:bracket_end]
+ queries_response = json.loads(queries_response)
+ except Exception as e:
+ queries_response = {"queries": [queries_response]}
+
+ queries = queries_response.get("queries", [])
+ except:
+ pass
if len(queries) == 0:
queries = [get_last_user_message(body["messages"])]
- await __event_emitter__(
- {
- "type": "status",
- "data": {
- "action": "queries_generated",
- "queries": queries,
- "done": False,
- },
- }
- )
+ if not all_full_context:
+ await __event_emitter__(
+ {
+ "type": "status",
+ "data": {
+ "action": "queries_generated",
+ "queries": queries,
+ "done": False,
+ },
+ }
+ )
try:
# Offload get_sources_from_items to a separate thread
@@ -701,7 +716,8 @@ async def chat_completion_files_handler(
r=request.app.state.config.RELEVANCE_THRESHOLD,
hybrid_bm25_weight=request.app.state.config.HYBRID_BM25_WEIGHT,
hybrid_search=request.app.state.config.ENABLE_RAG_HYBRID_SEARCH,
- full_context=request.app.state.config.RAG_FULL_CONTEXT,
+ full_context=all_full_context
+ or request.app.state.config.RAG_FULL_CONTEXT,
user=user,
),
)
@@ -817,10 +833,11 @@ async def process_chat_payload(request, form_data, user, metadata, model):
oauth_token = None
try:
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
- user.id,
- request.cookies.get("oauth_session_id", None),
- )
+ if request.cookies.get("oauth_session_id", None):
+ oauth_token = await request.app.state.oauth_manager.get_oauth_token(
+ user.id,
+ request.cookies.get("oauth_session_id", None),
+ )
except Exception as e:
log.error(f"Error getting OAuth token: {e}")
@@ -986,14 +1003,107 @@ async def process_chat_payload(request, form_data, user, metadata, model):
# Server side tools
tool_ids = metadata.get("tool_ids", None)
# Client side tools
- tool_servers = metadata.get("tool_servers", None)
+ direct_tool_servers = metadata.get("tool_servers", None)
log.debug(f"{tool_ids=}")
- log.debug(f"{tool_servers=}")
+ log.debug(f"{direct_tool_servers=}")
tools_dict = {}
+ mcp_clients = []
+ mcp_tools_dict = {}
+
if tool_ids:
+ for tool_id in tool_ids:
+ if tool_id.startswith("server:mcp:"):
+ try:
+ server_id = tool_id[len("server:mcp:") :]
+
+ mcp_server_connection = None
+ for (
+ server_connection
+ ) in request.app.state.config.TOOL_SERVER_CONNECTIONS:
+ if (
+ server_connection.get("type", "") == "mcp"
+ and server_connection.get("info", {}).get("id") == server_id
+ ):
+ mcp_server_connection = server_connection
+ break
+
+ if not mcp_server_connection:
+ log.error(f"MCP server with id {server_id} not found")
+ continue
+
+ auth_type = mcp_server_connection.get("auth_type", "")
+
+ headers = {}
+ if auth_type == "bearer":
+ headers["Authorization"] = (
+ f"Bearer {mcp_server_connection.get('key', '')}"
+ )
+ elif auth_type == "none":
+ # No authentication
+ pass
+ elif auth_type == "session":
+ headers["Authorization"] = (
+ f"Bearer {request.state.token.credentials}"
+ )
+ elif auth_type == "system_oauth":
+ oauth_token = extra_params.get("__oauth_token__", None)
+ if oauth_token:
+ headers["Authorization"] = (
+ f"Bearer {oauth_token.get('access_token', '')}"
+ )
+ elif auth_type == "oauth_2.1":
+ try:
+ splits = server_id.split(":")
+ server_id = splits[-1] if len(splits) > 1 else server_id
+
+ oauth_token = await request.app.state.oauth_client_manager.get_oauth_token(
+ user.id, f"mcp:{server_id}"
+ )
+
+ if oauth_token:
+ headers["Authorization"] = (
+ f"Bearer {oauth_token.get('access_token', '')}"
+ )
+ except Exception as e:
+ log.error(f"Error getting OAuth token: {e}")
+ oauth_token = None
+
+ mcp_client = MCPClient()
+ await mcp_client.connect(
+ url=mcp_server_connection.get("url", ""),
+ headers=headers if headers else None,
+ )
+
+ tool_specs = await mcp_client.list_tool_specs()
+ for tool_spec in tool_specs:
+
+ def make_tool_function(function_name):
+ async def tool_function(**kwargs):
+ return await mcp_client.call_tool(
+ function_name,
+ function_args=kwargs,
+ )
+
+ return tool_function
+
+ tool_function = make_tool_function(tool_spec["name"])
+
+ mcp_tools_dict[tool_spec["name"]] = {
+ "spec": tool_spec,
+ "callable": tool_function,
+ "type": "mcp",
+ "client": mcp_client,
+ "direct": False,
+ }
+
+ mcp_clients.append(mcp_client)
+ except Exception as e:
+ log.debug(e)
+ continue
+
tools_dict = await get_tools(
request,
tool_ids,
@@ -1005,9 +1115,11 @@ async def process_chat_payload(request, form_data, user, metadata, model):
"__files__": metadata.get("files", []),
},
)
+ if mcp_tools_dict:
+ tools_dict = {**tools_dict, **mcp_tools_dict}
- if tool_servers:
- for tool_server in tool_servers:
+ if direct_tool_servers:
+ for tool_server in direct_tool_servers:
tool_specs = tool_server.pop("specs", [])
for tool in tool_specs:
@@ -1017,6 +1129,9 @@ async def process_chat_payload(request, form_data, user, metadata, model):
"server": tool_server,
}
+ if mcp_clients:
+ metadata["mcp_clients"] = mcp_clients
+
if tools_dict:
if metadata.get("params", {}).get("function_calling") == "native":
# If the function calling is native, then call the tools function calling handler
@@ -1025,6 +1140,7 @@ async def process_chat_payload(request, form_data, user, metadata, model):
{"type": "function", "function": tool.get("spec", {})}
for tool in tools_dict.values()
]
+
else:
# If the function calling is not native, then call the tools function calling handler
try:
@@ -1078,26 +1194,15 @@ async def process_chat_payload(request, form_data, user, metadata, model):
raise Exception("No user message found")
if context_string != "":
- # Workaround for Ollama 2.0+ system prompt issue
- # TODO: replace with add_or_update_system_message
- if model.get("owned_by") == "ollama":
- form_data["messages"] = prepend_to_first_user_message_content(
- rag_template(
- request.app.state.config.RAG_TEMPLATE,
- context_string,
- prompt,
- ),
- form_data["messages"],
- )
- else:
- form_data["messages"] = add_or_update_system_message(
- rag_template(
- request.app.state.config.RAG_TEMPLATE,
- context_string,
- prompt,
- ),
- form_data["messages"],
- )
+ form_data["messages"] = add_or_update_user_message(
+ rag_template(
+ request.app.state.config.RAG_TEMPLATE,
+ context_string,
+ prompt,
+ ),
+ form_data["messages"],
+ append=False,
+ )
# If there are citations, add them to the data_items
sources = [
@@ -1130,11 +1235,11 @@ async def process_chat_response(
request, response, form_data, user, metadata, model, events, tasks
):
async def background_tasks_handler():
- message_map = Chats.get_messages_by_chat_id(metadata["chat_id"])
- message = message_map.get(metadata["message_id"]) if message_map else None
+ messages_map = Chats.get_messages_map_by_chat_id(metadata["chat_id"])
+ message = messages_map.get(metadata["message_id"]) if messages_map else None
if message:
- message_list = get_message_list(message_map, metadata["message_id"])
+ message_list = get_message_list(messages_map, metadata["message_id"])
# Remove details tags and files from the messages.
# as get_message_list creates a new list, it does not affect
@@ -1496,10 +1601,11 @@ async def process_chat_response(
oauth_token = None
try:
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
- user.id,
- request.cookies.get("oauth_session_id", None),
- )
+ if request.cookies.get("oauth_session_id", None):
+ oauth_token = await request.app.state.oauth_manager.get_oauth_token(
+ user.id,
+ request.cookies.get("oauth_session_id", None),
+ )
except Exception as e:
log.error(f"Error getting OAuth token: {e}")
@@ -1579,7 +1685,8 @@ async def process_chat_response(
break
if tool_result is not None:
- tool_calls_display_content = f'{tool_calls_display_content}Tool Executed
\nTool Executed
\nExecuting...
\n