diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9e67c9fbd5..7e04470d0e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,76 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [0.6.30] - 2025-09-17
+
+### Added
+
+- 🔑 Microsoft Entra ID authentication type support was added for Azure OpenAI connections, enabling enhanced security and streamlined authentication workflows.
+
+### Fixed
+
+- ☁️ OneDrive integration was fixed after recent breakage, restoring reliable account connectivity and file access.
+
+## [0.6.29] - 2025-09-17
+
+### Added
+
+- 🎨 The chat input menu has been completely overhauled with a revolutionary new design, consolidating attachments under a unified '+' button, organizing integrations into a streamlined options menu, and introducing powerful, interactive selectors for attaching chats, notes, and knowledge base items. [Commit](https://github.com/open-webui/open-webui/commit/a68342d5a887e36695e21f8c2aec593b159654ff), [Commit](https://github.com/open-webui/open-webui/commit/96b8aaf83ff341fef432649366bc5155bac6cf20), [Commit](https://github.com/open-webui/open-webui/commit/4977e6d50f7b931372c96dd5979ca635d58aeb78), [Commit](https://github.com/open-webui/open-webui/commit/d973db829f7ec98b8f8fe7d3b2822d588e79f94e), [Commit](https://github.com/open-webui/open-webui/commit/d4c628de09654df76653ad9bce9cb3263e2f27c8), [Commit](https://github.com/open-webui/open-webui/commit/cd740f436db4ea308dbede14ef7ff56e8126f51b), [Commit](https://github.com/open-webui/open-webui/commit/5c2db102d06b5c18beb248d795682ff422e9b6d1), [Commit](https://github.com/open-webui/open-webui/commit/031cf38655a1a2973194d2eaa0fbbd17aca8ee92), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/3ed0a6d11fea1a054e0bc8aa8dfbe417c7c53e51), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/eadec9e86e01bc8f9fb90dfe7a7ae4fc3bfa6420), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/c03ca7270e64e3a002d321237160c0ddaf2bb129), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/b53ddfbd19aa94e9cbf7210acb31c3cfafafa5fe), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/c923461882fcde30ae297a95e91176c95b9b72e1)
+- 🤖 AI models can now be mentioned in channels to automatically generate responses, enabling multi-model conversations where mentioned models participate directly in threaded discussions with full context awareness. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/4fe97d8794ee18e087790caab9e5d82886006145)
+- 💬 The Channels feature now utilizes the modern rich text editor, including support for '/', '@', and '#' command suggestions. [Commit](https://github.com/open-webui/open-webui/commit/06c1426e14ac0dfaf723485dbbc9723a4d89aba9), [Commit](https://github.com/open-webui/open-webui/commit/02f7c3258b62970ce79716f75d15467a96565054)
+- 📎 Channel message input now supports direct paste functionality for images and files from the clipboard, streamlining content sharing workflows. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/6549fc839f86c40c26c2ef4dedcaf763a9304418)
+- ⚙️ Models can now be configured with default features (Web Search, Image Generation) and filters that automatically activate when a user selects the model. [Commit](https://github.com/open-webui/open-webui/commit/9a555478273355a5177bfc7f7211c64778e4c8de), [Commit](https://github.com/open-webui/open-webui/commit/384a53b339820068e92f7eaea0d9f3e0536c19c2), [Commit](https://github.com/open-webui/open-webui/commit/d7f43bfc1a30c065def8c50d77c2579c1a3c5c67), [Commit](https://github.com/open-webui/open-webui/commit/6a67a2217cc5946ad771e479e3a37ac213210748)
+- 💬 The ability to reference other chats as context within a conversation was added via the attachment menu. [Commit](https://github.com/open-webui/open-webui/commit/e097bbdf11ae4975c622e086df00d054291cdeb3), [Commit](https://github.com/open-webui/open-webui/commit/f3cd2ffb18e7dedbe88430f9ae7caa6b3cfd79d0), [Commit](https://github.com/open-webui/open-webui/commit/74263c872c5d574a9bb0944d7984f748dc772dba), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/aa8ab349ed2fcb46d1cf994b9c0de2ec2ea35d0d), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/025eef754f0d46789981defd473d001e3b1d0ca2)
+- 🎨 The command suggestion UI for prompts ('/'), models ('@'), and knowledge ('#') was completely overhauled with a more responsive and keyboard-navigable interface. [Commit](https://github.com/open-webui/open-webui/commit/6b69c4da0fb9329ccf7024483960e070cf52ccab), [Commit](https://github.com/open-webui/open-webui/commit/06a6855f844456eceaa4d410c93379460e208202), [Commit](https://github.com/open-webui/open-webui/commit/c55f5578280b936cf581a743df3703e3db1afd54), [Commit](https://github.com/open-webui/open-webui/commit/f68d1ba394d4423d369f827894cde99d760b2402)
+- 👥 User and channel suggestions were added to the mention system, enabling '@' mentions for users and models, and '#' mentions for channels with searchable user lookup and clickable navigation. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/bbd1d2b58c89b35daea234f1fc9208f2af840899), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/aef1e06f0bb72065a25579c982dd49157e320268), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/779db74d7e9b7b00d099b7d65cfbc8a831e74690)
+- 📁 Folder functionality was enhanced with custom background image support, improved drag-and-drop capabilities for moving folders to root level, and better menu interactions. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/2a234829f5dfdfde27fdfd30591caa908340efb4), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/2b1ee8b0dc5f7c0caaafdd218f20705059fa72e2), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/b1e5bc8e490745f701909c19b6a444b67c04660e), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/3e584132686372dfeef187596a7c557aa5f48308)
+- ☁️ OneDrive integration configuration now supports selecting between personal and work/school account types via ENABLE_ONEDRIVE_PERSONAL and ENABLE_ONEDRIVE_BUSINESS environment variables. [#17354](https://github.com/open-webui/open-webui/pull/17354), [Commit](https://github.com/open-webui/open-webui/commit/e1e3009a30f9808ce06582d81a60e391f5ca09ec), [Docs:#697](https://github.com/open-webui/docs/pull/697)
+- ⚡ Mermaid.js is now dynamically loaded on demand, significantly reducing first-screen loading time and improving initial page performance. [#17476](https://github.com/open-webui/open-webui/issues/17476), [#17477](https://github.com/open-webui/open-webui/pull/17477)
+- ⚡ Azure MSAL browser library is now dynamically loaded on demand, reducing initial bundle size by 730KB and improving first-screen loading speed. [#17479](https://github.com/open-webui/open-webui/pull/17479)
+- ⚡ CodeEditor component is now dynamically loaded on demand, reducing initial bundle size by 1MB and improving first-screen loading speed. [#17498](https://github.com/open-webui/open-webui/pull/17498)
+- ⚡ Hugging Face Transformers library is now dynamically loaded on demand, reducing initial bundle size by 1.9MB and improving first-screen loading speed. [#17499](https://github.com/open-webui/open-webui/pull/17499)
+- ⚡ jsPDF and html2canvas-pro libraries are now dynamically loaded on demand, reducing initial bundle size by 980KB and improving first-screen loading speed. [#17502](https://github.com/open-webui/open-webui/pull/17502)
+- ⚡ Leaflet mapping library is now dynamically loaded on demand, reducing initial bundle size by 454KB and improving first-screen loading speed. [#17503](https://github.com/open-webui/open-webui/pull/17503)
+- 📊 OpenTelemetry metrics collection was enhanced to properly handle HTTP 500 errors and ensure metrics are recorded even during exceptions. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/b14617a653c6bdcfd3102c12f971924fd1faf572)
+- 🔒 OAuth token retrieval logic was refactored, improving the reliability and consistency of authentication handling across the backend. [Commit](https://github.com/open-webui/open-webui/commit/6c0a5fa91cdbf6ffb74667ee61ca96bebfdfbc50)
+- 💻 Code block output processing was improved to handle Python execution results more reliably, along with refined visual styling and button layouts. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/0e5320c39e308ff97f2ca9e289618af12479eb6e)
+- ⚡ Message input processing was optimized to skip unnecessary text variable handling when input is empty, improving performance. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/e1386fe80b77126a12dabc4ad058abe9b024b275)
+- 📄 Individual chat PDF export was added to the sidebar chat menu, allowing users to export single conversations as PDF documents with both stylized and plain text options. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/d041d58bb619689cd04a391b4f8191b23941ca62)
+- 🛠️ Function validation was enhanced with improved valve validation and better error handling during function loading and synchronization. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/e66e0526ed6a116323285f79f44237538b6c75e6), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/8edfd29102e0a61777b23d3575eaa30be37b59a5)
+- 🔔 Notification toast interaction was enhanced with drag detection to prevent accidental clicks and added keyboard support for accessibility. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/621e7679c427b6f0efa85f95235319238bf171ad)
+- 🗓️ Improved date and time formatting dynamically adapts to the selected language, ensuring consistent localization across the UI. [#17409](https://github.com/open-webui/open-webui/pull/17409), [Commit](https://github.com/open-webui/open-webui/commit/2227f24bd6d861b1fad8d2cabacf7d62ce137d0c)
+- 🔒 Feishu SSO integration was added, allowing users to authenticate via Feishu. [#17284](https://github.com/open-webui/open-webui/pull/17284), [Docs:#685](https://github.com/open-webui/docs/pull/685)
+- 🔠 Toggle filters in the chat input options menu are now sorted alphabetically for easier navigation. [Commit](https://github.com/open-webui/open-webui/commit/ca853ca4656180487afcd84230d214f91db52533)
+- 🎨 Long chat titles in the sidebar are now truncated to prevent text overflow and maintain a clean layout. [#17356](https://github.com/open-webui/open-webui/pull/17356)
+- 🎨 Temporary chat interface design was refined with improved layout and visual consistency. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/67549dcadd670285d491bd41daf3d081a70fd094), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/2ca34217e68f3b439899c75881dfb050f49c9eb2), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/fb02ec52a5df3f58b53db4ab3a995c15f83503cd)
+- 🎨 Download icon consistency was improved across the entire interface by standardizing the icon component used in menus, functions, tools, and export features. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/596be451ece7e11b5cd25465d49670c27a1cb33f)
+- 🎨 Settings interface was enhanced with improved iconography and reorganized the 'Chats' section into 'Data Controls' for better clarity. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/8bf0b40fdd978b5af6548a6e1fb3aabd90bcd5cd)
+- 🔄 Various improvements were implemented across the frontend and backend to enhance performance, stability, and security.
+- 🌐 Translations for Finnish, German, Kabyle, Portuguese (Brazil), Simplified Chinese, Spanish (Spain), and Traditional Chinese (Taiwan) were enhanced and expanded.
+
+### Fixed
+
+- 📚 Knowledge base permission logic was corrected to ensure private collection owners can access their own content when embedding bypass is enabled. [#17432](https://github.com/open-webui/open-webui/issues/17432), [Commit](https://github.com/open-webui/open-webui/commit/a51f0c30ec1472d71487eab3e15d0351a2716b12)
+- ⚙️ Connection URL editing in Admin Settings now properly saves changes instead of reverting to original values, fixing issues with both Ollama and OpenAI-compatible endpoints. [#17435](https://github.com/open-webui/open-webui/issues/17435), [Commit](https://github.com/open-webui/open-webui/commit/e4c864de7eb0d577843a80688677ce3659d1f81f)
+- 📊 Usage information collection from Google models was corrected to handle providers that send usage data alongside content chunks instead of separately. [#17421](https://github.com/open-webui/open-webui/pull/17421), [Commit](https://github.com/open-webui/open-webui/commit/c2f98a4cd29ed738f395fef09c42ab8e73cd46a0)
+- ⚙️ Settings modal scrolling issue was resolved by moving image compression controls to a dedicated modal, preventing the main settings from becoming scrollable out of view. [#17474](https://github.com/open-webui/open-webui/issues/17474), [Commit](https://github.com/open-webui/open-webui/commit/fed5615c19b0045a55b0be426b468a57bfda4b66)
+- 📁 Folder click behavior was improved to prevent accidental actions by implementing proper double-click detection and timing delays for folder expansion and selection. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/19e3214997170eea6ee92452e8c778e04a28e396)
+- 🔐 Access control component reliability was improved with better null checking and error handling for group permissions and private access scenarios. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/c8780a7f934c5e49a21b438f2f30232f83cf75d2), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/32015c392dbc6b7367a6a91d9e173e675ea3402c)
+- 🔗 The citation modal now correctly displays and links to external web page sources in addition to internal documents. [Commit](https://github.com/open-webui/open-webui/commit/9208a84185a7e59524f00a7576667d493c3ac7d4)
+- 🔗 Web and YouTube attachment handling was fixed, ensuring their content is now reliably processed and included in the chat context for retrieval. [Commit](https://github.com/open-webui/open-webui/commit/210197fd438b52080cda5d6ce3d47b92cdc264c8)
+- 📂 Large file upload failures are resolved by correcting the processing logic for scenarios where document embedding is bypassed. [Commit](https://github.com/open-webui/open-webui/commit/051b6daa8299fd332503bd584563556e2ae6adab)
+- 🌐 Rich text input placeholder text now correctly updates when the interface language is switched, ensuring proper localization. [#17473](https://github.com/open-webui/open-webui/pull/17473), [Commit](https://github.com/open-webui/open-webui/commit/77358031f5077e6efe5cc08d8d4e5831c7cd1cd9)
+- 📊 Llama.cpp server timing metrics are now correctly parsed and displayed by fixing a typo in the response handling. [#17350](https://github.com/open-webui/open-webui/issues/17350), [Commit](https://github.com/open-webui/open-webui/commit/cf72f5503f39834b9da44ebbb426a3674dad0caa)
+- 🛠️ Filter functions with file_handler configuration now properly handle messages without file attachments, preventing runtime errors. [#17423](https://github.com/open-webui/open-webui/pull/17423)
+- 🔔 Channel notification delivery was fixed to properly handle background task execution and user access checking. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/1077b2ac8b96e49c2ad2620e76eb65bbb2a3a1f3)
+
+### Changed
+
+- 📝 Prompt template variables are now optional by default instead of being forced as required, allowing flexible workflows with optional metadata fields. [#17447](https://github.com/open-webui/open-webui/issues/17447), [Commit](https://github.com/open-webui/open-webui/commit/d5824b1b495fcf86e57171769bcec2a0f698b070), [Docs:#696](https://github.com/open-webui/docs/pull/696)
+- 🛠️ Direct external tool servers now require explicit user selection from the input interface instead of being automatically included in conversations, providing better control over tool usage. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/0f04227c34ca32746c43a9323e2df32299fcb6af), [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/99bba12de279dd55c55ded35b2e4f819af1c9ab5)
+- 📺 Widescreen mode option was removed from Channels interface, with all channel layouts now using full-width display. [Commit](https://github.com/open-webui/open-webui/pull/17420/commits/d46b7b8f1b99a8054b55031fe935c8a16d5ec956)
+- 🎛️ The plain textarea input option was deprecated, and the custom text editor is now the standard for all chat inputs. [Commit](https://github.com/open-webui/open-webui/commit/153afd832ccd12a1e5fd99b085008d080872c161)
+
## [0.6.28] - 2025-09-10
### Added
diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py
index 207d4405ee..ca090efa22 100644
--- a/backend/open_webui/config.py
+++ b/backend/open_webui/config.py
@@ -730,6 +730,7 @@ def load_oauth_providers():
}
if FEISHU_CLIENT_ID.value and FEISHU_CLIENT_SECRET.value:
+
def feishu_oauth_register(client: OAuth):
client.register(
name="feishu",
@@ -2167,6 +2168,12 @@ ENABLE_ONEDRIVE_INTEGRATION = PersistentConfig(
"onedrive.enable",
os.getenv("ENABLE_ONEDRIVE_INTEGRATION", "False").lower() == "true",
)
+ENABLE_ONEDRIVE_PERSONAL = (
+ os.environ.get("ENABLE_ONEDRIVE_PERSONAL", "True").lower() == "true"
+)
+ENABLE_ONEDRIVE_BUSINESS = (
+ os.environ.get("ENABLE_ONEDRIVE_BUSINESS", "True").lower() == "true"
+)
ONEDRIVE_CLIENT_ID = PersistentConfig(
"ONEDRIVE_CLIENT_ID",
diff --git a/backend/open_webui/functions.py b/backend/open_webui/functions.py
index 7224d28113..d102263cb3 100644
--- a/backend/open_webui/functions.py
+++ b/backend/open_webui/functions.py
@@ -19,6 +19,7 @@ from fastapi import (
from starlette.responses import Response, StreamingResponse
+from open_webui.constants import ERROR_MESSAGES
from open_webui.socket.main import (
get_event_call,
get_event_emitter,
@@ -60,8 +61,20 @@ def get_function_module_by_id(request: Request, pipe_id: str):
function_module, _, _ = get_function_module_from_cache(request, pipe_id)
if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
+ Valves = function_module.Valves
valves = Functions.get_function_valves_by_id(pipe_id)
- function_module.valves = function_module.Valves(**(valves if valves else {}))
+
+ if valves:
+ try:
+ function_module.valves = Valves(
+ **{k: v for k, v in valves.items() if v is not None}
+ )
+ except Exception as e:
+ log.exception(f"Error loading valves for function {pipe_id}: {e}")
+ raise e
+ else:
+ function_module.valves = Valves()
+
return function_module
@@ -70,65 +83,69 @@ async def get_function_models(request):
pipe_models = []
for pipe in pipes:
- function_module = get_function_module_by_id(request, pipe.id)
+ try:
+ function_module = get_function_module_by_id(request, pipe.id)
- # Check if function is a manifold
- if hasattr(function_module, "pipes"):
- sub_pipes = []
-
- # Handle pipes being a list, sync function, or async function
- try:
- if callable(function_module.pipes):
- if asyncio.iscoroutinefunction(function_module.pipes):
- sub_pipes = await function_module.pipes()
- else:
- sub_pipes = function_module.pipes()
- else:
- sub_pipes = function_module.pipes
- except Exception as e:
- log.exception(e)
+ # Check if function is a manifold
+ if hasattr(function_module, "pipes"):
sub_pipes = []
- log.debug(
- f"get_function_models: function '{pipe.id}' is a manifold of {sub_pipes}"
- )
+ # Handle pipes being a list, sync function, or async function
+ try:
+ if callable(function_module.pipes):
+ if asyncio.iscoroutinefunction(function_module.pipes):
+ sub_pipes = await function_module.pipes()
+ else:
+ sub_pipes = function_module.pipes()
+ else:
+ sub_pipes = function_module.pipes
+ except Exception as e:
+ log.exception(e)
+ sub_pipes = []
- for p in sub_pipes:
- sub_pipe_id = f'{pipe.id}.{p["id"]}'
- sub_pipe_name = p["name"]
+ log.debug(
+ f"get_function_models: function '{pipe.id}' is a manifold of {sub_pipes}"
+ )
- if hasattr(function_module, "name"):
- sub_pipe_name = f"{function_module.name}{sub_pipe_name}"
+ for p in sub_pipes:
+ sub_pipe_id = f'{pipe.id}.{p["id"]}'
+ sub_pipe_name = p["name"]
- pipe_flag = {"type": pipe.type}
+ if hasattr(function_module, "name"):
+ sub_pipe_name = f"{function_module.name}{sub_pipe_name}"
+
+ pipe_flag = {"type": pipe.type}
+
+ pipe_models.append(
+ {
+ "id": sub_pipe_id,
+ "name": sub_pipe_name,
+ "object": "model",
+ "created": pipe.created_at,
+ "owned_by": "openai",
+ "pipe": pipe_flag,
+ }
+ )
+ else:
+ pipe_flag = {"type": "pipe"}
+
+ log.debug(
+ f"get_function_models: function '{pipe.id}' is a single pipe {{ 'id': {pipe.id}, 'name': {pipe.name} }}"
+ )
pipe_models.append(
{
- "id": sub_pipe_id,
- "name": sub_pipe_name,
+ "id": pipe.id,
+ "name": pipe.name,
"object": "model",
"created": pipe.created_at,
"owned_by": "openai",
"pipe": pipe_flag,
}
)
- else:
- pipe_flag = {"type": "pipe"}
-
- log.debug(
- f"get_function_models: function '{pipe.id}' is a single pipe {{ 'id': {pipe.id}, 'name': {pipe.name} }}"
- )
-
- pipe_models.append(
- {
- "id": pipe.id,
- "name": pipe.name,
- "object": "model",
- "created": pipe.created_at,
- "owned_by": "openai",
- "pipe": pipe_flag,
- }
- )
+ except Exception as e:
+ log.exception(e)
+ continue
return pipe_models
@@ -222,7 +239,7 @@ async def generate_function_chat_completion(
oauth_token = None
try:
if request.cookies.get("oauth_session_id", None):
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
+ oauth_token = await request.app.state.oauth_manager.get_oauth_token(
user.id,
request.cookies.get("oauth_session_id", None),
)
diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py
index 6b536c78bc..5630a58839 100644
--- a/backend/open_webui/main.py
+++ b/backend/open_webui/main.py
@@ -110,9 +110,6 @@ from open_webui.config import (
OLLAMA_API_CONFIGS,
# OpenAI
ENABLE_OPENAI_API,
- ONEDRIVE_CLIENT_ID,
- ONEDRIVE_SHAREPOINT_URL,
- ONEDRIVE_SHAREPOINT_TENANT_ID,
OPENAI_API_BASE_URLS,
OPENAI_API_KEYS,
OPENAI_API_CONFIGS,
@@ -303,14 +300,16 @@ from open_webui.config import (
GOOGLE_PSE_ENGINE_ID,
GOOGLE_DRIVE_CLIENT_ID,
GOOGLE_DRIVE_API_KEY,
+ ENABLE_ONEDRIVE_INTEGRATION,
ONEDRIVE_CLIENT_ID,
ONEDRIVE_SHAREPOINT_URL,
ONEDRIVE_SHAREPOINT_TENANT_ID,
+ ENABLE_ONEDRIVE_PERSONAL,
+ ENABLE_ONEDRIVE_BUSINESS,
ENABLE_RAG_HYBRID_SEARCH,
ENABLE_RAG_LOCAL_WEB_FETCH,
ENABLE_WEB_LOADER_SSL_VERIFICATION,
ENABLE_GOOGLE_DRIVE_INTEGRATION,
- ENABLE_ONEDRIVE_INTEGRATION,
UPLOAD_DIR,
EXTERNAL_WEB_SEARCH_URL,
EXTERNAL_WEB_SEARCH_API_KEY,
@@ -448,6 +447,7 @@ from open_webui.utils.models import (
get_all_models,
get_all_base_models,
check_model_access,
+ get_filtered_models,
)
from open_webui.utils.chat import (
generate_chat_completion as chat_completion_handler,
@@ -1291,33 +1291,6 @@ if audit_level != AuditLevel.NONE:
async def get_models(
request: Request, refresh: bool = False, user=Depends(get_verified_user)
):
- def get_filtered_models(models, user):
- filtered_models = []
- for model in models:
- if model.get("arena"):
- if has_access(
- user.id,
- type="read",
- access_control=model.get("info", {})
- .get("meta", {})
- .get("access_control", {}),
- ):
- filtered_models.append(model)
- continue
-
- model_info = Models.get_model_by_id(model["id"])
- if model_info:
- if (
- (user.role == "admin" and BYPASS_ADMIN_ACCESS_CONTROL)
- or user.id == model_info.user_id
- or has_access(
- user.id, type="read", access_control=model_info.access_control
- )
- ):
- filtered_models.append(model)
-
- return filtered_models
-
all_models = await get_all_models(request, refresh=refresh, user=user)
models = []
@@ -1353,12 +1326,7 @@ async def get_models(
)
)
- # Filter out models that the user does not have access to
- if (
- user.role == "user"
- or (user.role == "admin" and not BYPASS_ADMIN_ACCESS_CONTROL)
- ) and not BYPASS_MODEL_ACCESS_CONTROL:
- models = get_filtered_models(models, user)
+ models = get_filtered_models(models, user)
log.debug(
f"/api/models returned filtered models accessible to the user: {json.dumps([model.get('id') for model in models])}"
@@ -1730,6 +1698,14 @@ async def get_app_config(request: Request):
"enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS,
"enable_google_drive_integration": app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION,
"enable_onedrive_integration": app.state.config.ENABLE_ONEDRIVE_INTEGRATION,
+ **(
+ {
+ "enable_onedrive_personal": ENABLE_ONEDRIVE_PERSONAL,
+ "enable_onedrive_business": ENABLE_ONEDRIVE_BUSINESS,
+ }
+ if app.state.config.ENABLE_ONEDRIVE_INTEGRATION
+ else {}
+ ),
}
if user is not None
else {}
diff --git a/backend/open_webui/models/messages.py b/backend/open_webui/models/messages.py
index a27ae52519..ff4553ee9d 100644
--- a/backend/open_webui/models/messages.py
+++ b/backend/open_webui/models/messages.py
@@ -201,8 +201,14 @@ class MessageTable:
with get_db() as db:
message = db.get(Message, id)
message.content = form_data.content
- message.data = form_data.data
- message.meta = form_data.meta
+ message.data = {
+ **(message.data if message.data else {}),
+ **(form_data.data if form_data.data else {}),
+ }
+ message.meta = {
+ **(message.meta if message.meta else {}),
+ **(form_data.meta if form_data.meta else {}),
+ }
message.updated_at = int(time.time_ns())
db.commit()
db.refresh(message)
diff --git a/backend/open_webui/models/users.py b/backend/open_webui/models/users.py
index 620a746eed..05000744dd 100644
--- a/backend/open_webui/models/users.py
+++ b/backend/open_webui/models/users.py
@@ -107,11 +107,21 @@ class UserInfoResponse(BaseModel):
role: str
+class UserIdNameResponse(BaseModel):
+ id: str
+ name: str
+
+
class UserInfoListResponse(BaseModel):
users: list[UserInfoResponse]
total: int
+class UserIdNameListResponse(BaseModel):
+ users: list[UserIdNameResponse]
+ total: int
+
+
class UserResponse(BaseModel):
id: str
name: str
@@ -210,7 +220,7 @@ class UsersTable:
filter: Optional[dict] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
- ) -> UserListResponse:
+ ) -> dict:
with get_db() as db:
query = db.query(User)
diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py
index f5db7521b5..aec8de6846 100644
--- a/backend/open_webui/retrieval/utils.py
+++ b/backend/open_webui/retrieval/utils.py
@@ -621,6 +621,7 @@ def get_sources_from_items(
if knowledge_base and (
user.role == "admin"
+ or knowledge_base.user_id == user.id
or has_access(user.id, "read", knowledge_base.access_control)
):
diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py
index c4a187b50d..100610a83a 100644
--- a/backend/open_webui/routers/audio.py
+++ b/backend/open_webui/routers/audio.py
@@ -550,7 +550,7 @@ def transcription_handler(request, file_path, metadata):
metadata = metadata or {}
languages = [
- metadata.get("language", None) if WHISPER_LANGUAGE == "" else WHISPER_LANGUAGE,
+ metadata.get("language", None) if not WHISPER_LANGUAGE else WHISPER_LANGUAGE,
None, # Always fallback to None in case transcription fails
]
diff --git a/backend/open_webui/routers/channels.py b/backend/open_webui/routers/channels.py
index cf3603c6ff..da52be6e79 100644
--- a/backend/open_webui/routers/channels.py
+++ b/backend/open_webui/routers/channels.py
@@ -24,9 +24,17 @@ from open_webui.constants import ERROR_MESSAGES
from open_webui.env import SRC_LOG_LEVELS
+from open_webui.utils.models import (
+ get_all_models,
+ get_filtered_models,
+)
+from open_webui.utils.chat import generate_chat_completion
+
+
from open_webui.utils.auth import get_admin_user, get_verified_user
from open_webui.utils.access_control import has_access, get_users_with_access
from open_webui.utils.webhook import post_webhook
+from open_webui.utils.channels import extract_mentions, replace_mentions
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MODELS"])
@@ -200,14 +208,11 @@ async def send_notification(name, webui_url, channel, message, active_user_ids):
users = get_users_with_access("read", channel.access_control)
for user in users:
- if user.id in active_user_ids:
- continue
- else:
+ if user.id not in active_user_ids:
if user.settings:
webhook_url = user.settings.ui.get("notifications", {}).get(
"webhook_url", None
)
-
if webhook_url:
await post_webhook(
name,
@@ -221,14 +226,134 @@ async def send_notification(name, webui_url, channel, message, active_user_ids):
},
)
+ return True
-@router.post("/{id}/messages/post", response_model=Optional[MessageModel])
-async def post_new_message(
- request: Request,
- id: str,
- form_data: MessageForm,
- background_tasks: BackgroundTasks,
- user=Depends(get_verified_user),
+
+async def model_response_handler(request, channel, message, user):
+ MODELS = {
+ model["id"]: model
+ for model in get_filtered_models(await get_all_models(request, user=user), user)
+ }
+
+ mentions = extract_mentions(message.content)
+ message_content = replace_mentions(message.content)
+
+ # check if any of the mentions are models
+ model_mentions = [mention for mention in mentions if mention["id_type"] == "M"]
+ if not model_mentions:
+ return False
+
+ for mention in model_mentions:
+ model_id = mention["id"]
+ model = MODELS.get(model_id, None)
+
+ if model:
+ try:
+ # reverse to get in chronological order
+ thread_messages = Messages.get_messages_by_parent_id(
+ channel.id,
+ message.parent_id if message.parent_id else message.id,
+ )[::-1]
+
+ response_message, channel = await new_message_handler(
+ request,
+ channel.id,
+ MessageForm(
+ **{
+ "parent_id": (
+ message.parent_id if message.parent_id else message.id
+ ),
+ "content": f"",
+ "data": {},
+ "meta": {
+ "model_id": model_id,
+ "model_name": model.get("name", model_id),
+ },
+ }
+ ),
+ user,
+ )
+
+ thread_history = []
+ message_users = {}
+
+ for thread_message in thread_messages:
+ message_user = None
+ if thread_message.user_id not in message_users:
+ message_user = Users.get_user_by_id(thread_message.user_id)
+ message_users[thread_message.user_id] = message_user
+ else:
+ message_user = message_users[thread_message.user_id]
+
+ if thread_message.meta and thread_message.meta.get(
+ "model_id", None
+ ):
+ # If the message was sent by a model, use the model name
+ message_model_id = thread_message.meta.get("model_id", None)
+ message_model = MODELS.get(message_model_id, None)
+ username = (
+ message_model.get("name", message_model_id)
+ if message_model
+ else message_model_id
+ )
+ else:
+ username = message_user.name if message_user else "Unknown"
+
+ thread_history.append(
+ f"{username}: {replace_mentions(thread_message.content)}"
+ )
+
+ system_message = {
+ "role": "system",
+ "content": f"You are {model.get('name', model_id)}, an AI assistant participating in a threaded conversation. Be helpful, concise, and conversational."
+ + (
+ f"Here's the thread history:\n\n{''.join([f'{msg}' for msg in thread_history])}\n\nContinue the conversation naturally, addressing the most recent message while being aware of the full context."
+ if thread_history
+ else ""
+ ),
+ }
+
+ form_data = {
+ "model": model_id,
+ "messages": [
+ system_message,
+ {
+ "role": "user",
+ "content": f"{user.name if user else 'User'}: {message_content}",
+ },
+ ],
+ "stream": False,
+ }
+
+ res = await generate_chat_completion(
+ request,
+ form_data=form_data,
+ user=user,
+ )
+
+ if res:
+ await update_message_by_id(
+ channel.id,
+ response_message.id,
+ MessageForm(
+ **{
+ "content": res["choices"][0]["message"]["content"],
+ "meta": {
+ "done": True,
+ },
+ }
+ ),
+ user,
+ )
+ except Exception as e:
+ log.info(e)
+ pass
+
+ return True
+
+
+async def new_message_handler(
+ request: Request, id: str, form_data: MessageForm, user=Depends(get_verified_user)
):
channel = Channels.get_channel_by_id(id)
if not channel:
@@ -302,11 +427,30 @@ async def post_new_message(
},
to=f"channel:{channel.id}",
)
+ return MessageModel(**message.model_dump()), channel
+ except Exception as e:
+ log.exception(e)
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT()
+ )
- active_user_ids = get_user_ids_from_room(f"channel:{channel.id}")
- background_tasks.add_task(
- send_notification,
+@router.post("/{id}/messages/post", response_model=Optional[MessageModel])
+async def post_new_message(
+ request: Request,
+ id: str,
+ form_data: MessageForm,
+ background_tasks: BackgroundTasks,
+ user=Depends(get_verified_user),
+):
+
+ try:
+ message, channel = await new_message_handler(request, id, form_data, user)
+ active_user_ids = get_user_ids_from_room(f"channel:{channel.id}")
+
+ async def background_handler():
+ await model_response_handler(request, channel, message, user)
+ await send_notification(
request.app.state.WEBUI_NAME,
request.app.state.config.WEBUI_URL,
channel,
@@ -314,7 +458,12 @@ async def post_new_message(
active_user_ids,
)
- return MessageModel(**message.model_dump())
+ background_tasks.add_task(background_handler)
+
+ return message
+
+ except HTTPException as e:
+ raise e
except Exception as e:
log.exception(e)
raise HTTPException(
diff --git a/backend/open_webui/routers/chats.py b/backend/open_webui/routers/chats.py
index 6f853ab266..847368412e 100644
--- a/backend/open_webui/routers/chats.py
+++ b/backend/open_webui/routers/chats.py
@@ -166,7 +166,7 @@ async def import_chat(form_data: ChatImportForm, user=Depends(get_verified_user)
@router.get("/search", response_model=list[ChatTitleIdResponse])
-async def search_user_chats(
+def search_user_chats(
text: str, page: Optional[int] = None, user=Depends(get_verified_user)
):
if page is None:
diff --git a/backend/open_webui/routers/files.py b/backend/open_webui/routers/files.py
index 778fbdec27..84d8f841cf 100644
--- a/backend/open_webui/routers/files.py
+++ b/backend/open_webui/routers/files.py
@@ -120,11 +120,6 @@ def process_uploaded_file(request, file, file_path, file_item, file_metadata, us
f"File type {file.content_type} is not provided, but trying to process anyway"
)
process_file(request, ProcessFileForm(file_id=file_item.id), user=user)
-
- Files.update_file_data_by_id(
- file_item.id,
- {"status": "completed"},
- )
except Exception as e:
log.error(f"Error processing file: {file_item.id}")
Files.update_file_data_by_id(
diff --git a/backend/open_webui/routers/functions.py b/backend/open_webui/routers/functions.py
index 9f0651fd3f..202aa74ca4 100644
--- a/backend/open_webui/routers/functions.py
+++ b/backend/open_webui/routers/functions.py
@@ -148,6 +148,18 @@ async def sync_functions(
content=function.content,
)
+ if hasattr(function_module, "Valves") and function.valves:
+ Valves = function_module.Valves
+ try:
+ Valves(
+ **{k: v for k, v in function.valves.items() if v is not None}
+ )
+ except Exception as e:
+ log.exception(
+ f"Error validating valves for function {function.id}: {e}"
+ )
+ raise e
+
return Functions.sync_functions(user.id, form_data.functions)
except Exception as e:
log.exception(f"Failed to load a function: {e}")
diff --git a/backend/open_webui/routers/models.py b/backend/open_webui/routers/models.py
index a4d4e3668e..05d7c68006 100644
--- a/backend/open_webui/routers/models.py
+++ b/backend/open_webui/routers/models.py
@@ -1,4 +1,6 @@
from typing import Optional
+import io
+import base64
from open_webui.models.models import (
ModelForm,
@@ -10,12 +12,13 @@ from open_webui.models.models import (
from pydantic import BaseModel
from open_webui.constants import ERROR_MESSAGES
-from fastapi import APIRouter, Depends, HTTPException, Request, status
+from fastapi import APIRouter, Depends, HTTPException, Request, status, Response
+from fastapi.responses import FileResponse, StreamingResponse
from open_webui.utils.auth import get_admin_user, get_verified_user
from open_webui.utils.access_control import has_access, has_permission
-from open_webui.config import BYPASS_ADMIN_ACCESS_CONTROL
+from open_webui.config import BYPASS_ADMIN_ACCESS_CONTROL, STATIC_DIR
router = APIRouter()
@@ -129,6 +132,39 @@ async def get_model_by_id(id: str, user=Depends(get_verified_user)):
)
+###########################
+# GetModelById
+###########################
+
+
+@router.get("/model/profile/image")
+async def get_model_profile_image(id: str, user=Depends(get_verified_user)):
+ model = Models.get_model_by_id(id)
+ if model:
+ if model.meta.profile_image_url:
+ if model.meta.profile_image_url.startswith("http"):
+ return Response(
+ status_code=status.HTTP_302_FOUND,
+ headers={"Location": model.meta.profile_image_url},
+ )
+ elif model.meta.profile_image_url.startswith("data:image"):
+ try:
+ header, base64_data = model.meta.profile_image_url.split(",", 1)
+ image_data = base64.b64decode(base64_data)
+ image_buffer = io.BytesIO(image_data)
+
+ return StreamingResponse(
+ image_buffer,
+ media_type="image/png",
+ headers={"Content-Disposition": "inline; filename=image.png"},
+ )
+ except Exception as e:
+ pass
+ return FileResponse(f"{STATIC_DIR}/favicon.png")
+ else:
+ return FileResponse(f"{STATIC_DIR}/favicon.png")
+
+
############################
# ToggleModelById
############################
diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py
index 8dadf3523a..bf11ffa0dd 100644
--- a/backend/open_webui/routers/ollama.py
+++ b/backend/open_webui/routers/ollama.py
@@ -1694,25 +1694,27 @@ async def download_file_stream(
yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
if done:
- file.seek(0)
- chunk_size = 1024 * 1024 * 2
- hashed = calculate_sha256(file, chunk_size)
- file.seek(0)
+ file.close()
- url = f"{ollama_url}/api/blobs/sha256:{hashed}"
- response = requests.post(url, data=file)
+ with open(file_path, "rb") as file:
+ chunk_size = 1024 * 1024 * 2
+ hashed = calculate_sha256(file, chunk_size)
- if response.ok:
- res = {
- "done": done,
- "blob": f"sha256:{hashed}",
- "name": file_name,
- }
- os.remove(file_path)
+ url = f"{ollama_url}/api/blobs/sha256:{hashed}"
+ with requests.Session() as session:
+ response = session.post(url, data=file, timeout=30)
- yield f"data: {json.dumps(res)}\n\n"
- else:
- raise "Ollama: Could not create blob, Please try again."
+ if response.ok:
+ res = {
+ "done": done,
+ "blob": f"sha256:{hashed}",
+ "name": file_name,
+ }
+ os.remove(file_path)
+
+ yield f"data: {json.dumps(res)}\n\n"
+ else:
+ raise "Ollama: Could not create blob, Please try again."
# url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py
index 8d8883a02c..e8865b90a0 100644
--- a/backend/open_webui/routers/openai.py
+++ b/backend/open_webui/routers/openai.py
@@ -9,6 +9,8 @@ from aiocache import cached
import requests
from urllib.parse import quote
+from azure.identity import DefaultAzureCredential, get_bearer_token_provider
+
from fastapi import Depends, HTTPException, Request, APIRouter
from fastapi.responses import (
FileResponse,
@@ -119,7 +121,7 @@ def openai_reasoning_model_handler(payload):
return payload
-def get_headers_and_cookies(
+async def get_headers_and_cookies(
request: Request,
url,
key=None,
@@ -172,7 +174,7 @@ def get_headers_and_cookies(
oauth_token = None
try:
if request.cookies.get("oauth_session_id", None):
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
+ oauth_token = await request.app.state.oauth_manager.get_oauth_token(
user.id,
request.cookies.get("oauth_session_id", None),
)
@@ -182,12 +184,30 @@ def get_headers_and_cookies(
if oauth_token:
token = f"{oauth_token.get('access_token', '')}"
+ elif auth_type in ("azure_ad", "microsoft_entra_id"):
+ token = get_microsoft_entra_id_access_token()
+
if token:
headers["Authorization"] = f"Bearer {token}"
return headers, cookies
+def get_microsoft_entra_id_access_token():
+ """
+ Get Microsoft Entra ID access token using DefaultAzureCredential for Azure OpenAI.
+ Returns the token string or None if authentication fails.
+ """
+ try:
+ token_provider = get_bearer_token_provider(
+ DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
+ )
+ return token_provider()
+ except Exception as e:
+ log.error(f"Error getting Microsoft Entra ID access token: {e}")
+ return None
+
+
##########################################
#
# API routes
@@ -285,7 +305,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
request.app.state.config.OPENAI_API_CONFIGS.get(url, {}), # Legacy support
)
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, user=user
)
@@ -550,7 +570,7 @@ async def get_models(
timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST),
) as session:
try:
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, user=user
)
@@ -636,14 +656,17 @@ async def verify_connection(
timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST),
) as session:
try:
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, user=user
)
if api_config.get("azure", False):
- headers["api-key"] = key
- api_version = api_config.get("api_version", "") or "2023-03-15-preview"
+ # Only set api-key header if not using Azure Entra ID authentication
+ auth_type = api_config.get("auth_type", "bearer")
+ if auth_type not in ("azure_ad", "microsoft_entra_id"):
+ headers["api-key"] = key
+ api_version = api_config.get("api_version", "") or "2023-03-15-preview"
async with session.get(
url=f"{url}/openai/models?api-version={api_version}",
headers=headers,
@@ -878,14 +901,19 @@ async def generate_chat_completion(
convert_logit_bias_input_to_json(payload["logit_bias"])
)
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, metadata, user=user
)
if api_config.get("azure", False):
api_version = api_config.get("api_version", "2023-03-15-preview")
request_url, payload = convert_to_azure_payload(url, payload, api_version)
- headers["api-key"] = key
+
+ # Only set api-key header if not using Azure Entra ID authentication
+ auth_type = api_config.get("auth_type", "bearer")
+ if auth_type not in ("azure_ad", "microsoft_entra_id"):
+ headers["api-key"] = key
+
headers["api-version"] = api_version
request_url = f"{request_url}/chat/completions?api-version={api_version}"
else:
@@ -982,7 +1010,9 @@ async def embeddings(request: Request, form_data: dict, user):
session = None
streaming = False
- headers, cookies = get_headers_and_cookies(request, url, key, api_config, user=user)
+ headers, cookies = await get_headers_and_cookies(
+ request, url, key, api_config, user=user
+ )
try:
session = aiohttp.ClientSession(trust_env=True)
r = await session.request(
@@ -1052,13 +1082,18 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
streaming = False
try:
- headers, cookies = get_headers_and_cookies(
+ headers, cookies = await get_headers_and_cookies(
request, url, key, api_config, user=user
)
if api_config.get("azure", False):
api_version = api_config.get("api_version", "2023-03-15-preview")
- headers["api-key"] = key
+
+ # Only set api-key header if not using Azure Entra ID authentication
+ auth_type = api_config.get("auth_type", "bearer")
+ if auth_type not in ("azure_ad", "microsoft_entra_id"):
+ headers["api-key"] = key
+
headers["api-version"] = api_version
payload = json.loads(body)
diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py
index 1f32791ba6..0ddf824efa 100644
--- a/backend/open_webui/routers/retrieval.py
+++ b/backend/open_webui/routers/retrieval.py
@@ -1584,12 +1584,19 @@ def process_file(
},
)
+ Files.update_file_data_by_id(
+ file.id,
+ {"status": "completed"},
+ )
+
return {
"status": True,
"collection_name": collection_name,
"filename": file.filename,
"content": text_content,
}
+ else:
+ raise Exception("Error saving document to vector database")
except Exception as e:
raise e
diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py
index 5b331dce73..9a0f8c6aaf 100644
--- a/backend/open_webui/routers/users.py
+++ b/backend/open_webui/routers/users.py
@@ -18,6 +18,7 @@ from open_webui.models.users import (
UserModel,
UserListResponse,
UserInfoListResponse,
+ UserIdNameListResponse,
UserRoleUpdateForm,
Users,
UserSettings,
@@ -100,6 +101,23 @@ async def get_all_users(
return Users.get_users()
+@router.get("/search", response_model=UserIdNameListResponse)
+async def search_users(
+ query: Optional[str] = None,
+ user=Depends(get_verified_user),
+):
+ limit = PAGE_ITEM_COUNT
+
+ page = 1 # Always return the first page for search
+ skip = (page - 1) * limit
+
+ filter = {}
+ if query:
+ filter["query"] = query
+
+ return Users.get_users(filter=filter, skip=skip, limit=limit)
+
+
############################
# User Groups
############################
diff --git a/backend/open_webui/utils/access_control.py b/backend/open_webui/utils/access_control.py
index 1529773c44..6215a6ac22 100644
--- a/backend/open_webui/utils/access_control.py
+++ b/backend/open_webui/utils/access_control.py
@@ -130,9 +130,10 @@ def has_access(
# Get all users with access to a resource
def get_users_with_access(
type: str = "write", access_control: Optional[dict] = None
-) -> List[UserModel]:
+) -> list[UserModel]:
if access_control is None:
- return Users.get_users()
+ result = Users.get_users()
+ return result.get("users", [])
permission_access = access_control.get(type, {})
permitted_group_ids = permission_access.get("group_ids", [])
diff --git a/backend/open_webui/utils/channels.py b/backend/open_webui/utils/channels.py
new file mode 100644
index 0000000000..312b5ea24c
--- /dev/null
+++ b/backend/open_webui/utils/channels.py
@@ -0,0 +1,31 @@
+import re
+
+
+def extract_mentions(message: str, triggerChar: str = "@"):
+ # Escape triggerChar in case it's a regex special character
+ triggerChar = re.escape(triggerChar)
+ pattern = rf"<{triggerChar}([A-Z]):([^|>]+)"
+
+ matches = re.findall(pattern, message)
+ return [{"id_type": id_type, "id": id_value} for id_type, id_value in matches]
+
+
+def replace_mentions(message: str, triggerChar: str = "@", use_label: bool = True):
+ """
+ Replace mentions in the message with either their label (after the pipe `|`)
+ or their id if no label exists.
+
+ Example:
+ "<@M:gpt-4.1|GPT-4>" -> "GPT-4" (if use_label=True)
+ "<@M:gpt-4.1|GPT-4>" -> "gpt-4.1" (if use_label=False)
+ """
+ # Escape triggerChar
+ triggerChar = re.escape(triggerChar)
+
+ def replacer(match):
+ id_type, id_value, label = match.groups()
+ return label if use_label and label else id_value
+
+ # Regex captures: idType, id, optional label
+ pattern = rf"<{triggerChar}([A-Z]):([^|>]+)(?:\|([^>]+))?>"
+ return re.sub(pattern, replacer, message)
diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py
index 89e4304474..3cd7d3a6e8 100644
--- a/backend/open_webui/utils/middleware.py
+++ b/backend/open_webui/utils/middleware.py
@@ -20,6 +20,7 @@ from concurrent.futures import ThreadPoolExecutor
from fastapi import Request, HTTPException
+from fastapi.responses import HTMLResponse
from starlette.responses import Response, StreamingResponse, JSONResponse
@@ -818,7 +819,7 @@ async def process_chat_payload(request, form_data, user, metadata, model):
oauth_token = None
try:
if request.cookies.get("oauth_session_id", None):
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
+ oauth_token = await request.app.state.oauth_manager.get_oauth_token(
user.id,
request.cookies.get("oauth_session_id", None),
)
@@ -1498,7 +1499,7 @@ async def process_chat_response(
oauth_token = None
try:
if request.cookies.get("oauth_session_id", None):
- oauth_token = request.app.state.oauth_manager.get_oauth_token(
+ oauth_token = await request.app.state.oauth_manager.get_oauth_token(
user.id,
request.cookies.get("oauth_session_id", None),
)
@@ -1581,7 +1582,8 @@ async def process_chat_response(
break
if tool_result is not None:
- tool_calls_display_content = f'{tool_calls_display_content}Tool Executed
\nTool Executed
\nExecuting...
\n