diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d09d3bf8d..3294d80949 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,42 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.6.15] - 2025-06-16 + +### Added + +- 🖼️ **Global Image Compression Option**: Effortlessly set image compression globally so all image uploads and outputs are optimized, speeding up load times and saving bandwidth—perfect for teams dealing with large files or limited network resources. +- 🎤 **Custom Speech-to-Text Content-Type for Transcription**: Define custom content types for audio transcription, ensuring compatibility with diverse audio sources and unlocking smoother, more accurate transcriptions in advanced setups. +- 🗂️ **LDAP Group Synchronization (Experimental)**: Automatically sync user groups from your LDAP directory directly into Open WebUI for seamless enterprise access management—simplifies identity integration and governance across your organization. +- 📈 **OpenTelemetry Metrics via OTLP Exporter (Experimental)**: Gain enterprise-grade analytics and monitor your AI usage in real time with experimental OpenTelemetry Metrics support—connect to any OTLP-compatible backend for instant insights into performance, load, and user interactions. +- 🕰️ **See User Message Timestamps on Hover (Chat Bubble UI)**: Effortlessly check when any user message was sent by hovering over it in Chat Bubble mode—no more switching screens or digging through logs for context. +- 🗂️ **Leaderboard Sorting Options**: Sort the leaderboard directly in the UI for a clearer, more actionable view of top performers, models, or tools—making analysis and recognition quick and easy for teams. +- 🏆 **Evaluation Details Modal in Feedbacks and Leaderboard**: Dive deeper with new modals that display detailed evaluation information when reviewing feedbacks and leaderboard rankings—accelerates learning, progress tracking, and quality improvement. +- 🔄 **Support for Multiple Pages in External Document Loaders**: Effortlessly extract and work with content spanning multiple pages in external documents, giving you complete flexibility for in-depth research and document workflows. +- 🌐 **New Accessibility Enhancements Across the Interface**: Benefit from significant accessibility improvements—tab navigation, ARIA roles/labels, better high-contrast text/modes, accessible modals, and more—making Open WebUI more usable and equitable for everyone, including those using assistive technologies. +- ⚡ **Performance & Stability Upgrades Across Frontend and Backend**: Enjoy a smoother, more reliable experience with numerous behind-the-scenes optimizations and refactoring on both frontend and backend—resulting in faster load times, fewer errors, and even greater stability throughout your workflows. +- 🌏 **Updated and Expanded Localizations**: Enjoy improved, up-to-date translations for Finnish, German (now with model pinning features), Korean, Russian, Simplified Chinese, Spanish, and more—making every interaction smoother, clearer, and more intuitive for international users. + +### Fixed + +- 🦾 **Ollama Error Messages More Descriptive**: Receive clearer, more actionable error messages when something goes wrong with Ollama models—making troubleshooting and user support faster and more effective. +- 🌐 **Bypass Webloader Now Works as Expected**: Resolved an issue where the "bypass webloader" feature failed to function correctly, ensuring web search bypasses operate smoothly and reliably for lighter, faster query results. +- 🔍 **Prevent Redundant Documents in Citation List**: The expanded citation list no longer shows duplicate documents, offering a cleaner, easier-to-digest reference experience when reviewing sources in knowledge and research workflows. +- 🛡️ **Trusted Header Email Matching is Now Case-Insensitive**: Fixed a critical authentication issue where email case sensitivity could cause secure headers to mismatch, ensuring robust, seamless login and session management in all environments. +- ⚙️ **Direct Tool Server Input Accepts Empty Strings**: You can now submit direct tool server commands without unexpected errors when passing empty-string values, improving integration and automation efficiency. +- 📄 **Citation Page Number for Page 1 is Now Displayed**: Corrected an oversight where references for page 1 documents were missing the page number; citations are now always accurate and fully visible. +- 📒 **Notes Access Restored**: Fixed an issue where some users could not access their notes—everyone can now view and manage their notes reliably, ensuring seamless documentation and workflow continuity. +- 🛑 **OAuth Callback Double-Slash Issue Resolved**: Fixed rare cases where an extra slash in OAuth callbacks caused failed logins or redirects, making third-party login integrations more reliable. + +### Changed + +- 🔑 **Dedicated Permission for System Prompts**: System prompt access is now controlled by its own specific permission instead of being grouped with general chat controls, empowering admins with finer-grained management over who can view or modify system prompts for enhanced security and workflow customization. +- 🛠️ **YouTube Transcript API and python-pptx Updated**: Enjoy better performance, reliability, and broader compatibility thanks to underlying library upgrades—less friction with media-rich and presentation workflows. + +### Removed + +- 🗑️ **Console Logging Disabled in Production**: All 'console.log' and 'console.debug' statements are now disabled in production, guaranteeing improved security and cleaner browser logs for end users by removing extraneous technical output. + ## [0.6.14] - 2025-06-10 ### Added diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index b48ba4f2e2..898ac1b594 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1077,6 +1077,10 @@ USER_PERMISSIONS_CHAT_CONTROLS = ( os.environ.get("USER_PERMISSIONS_CHAT_CONTROLS", "True").lower() == "true" ) +USER_PERMISSIONS_CHAT_SYSTEM_PROMPT = ( + os.environ.get("USER_PERMISSIONS_CHAT_SYSTEM_PROMPT", "True").lower() == "true" +) + USER_PERMISSIONS_CHAT_FILE_UPLOAD = ( os.environ.get("USER_PERMISSIONS_CHAT_FILE_UPLOAD", "True").lower() == "true" ) @@ -1162,6 +1166,7 @@ DEFAULT_USER_PERMISSIONS = { }, "chat": { "controls": USER_PERMISSIONS_CHAT_CONTROLS, + "system_prompt": USER_PERMISSIONS_CHAT_SYSTEM_PROMPT, "file_upload": USER_PERMISSIONS_CHAT_FILE_UPLOAD, "delete": USER_PERMISSIONS_CHAT_DELETE, "edit": USER_PERMISSIONS_CHAT_EDIT, @@ -2102,6 +2107,27 @@ RAG_FILE_MAX_SIZE = PersistentConfig( ), ) +FILE_IMAGE_COMPRESSION_WIDTH = PersistentConfig( + "FILE_IMAGE_COMPRESSION_WIDTH", + "file.image_compression_width", + ( + int(os.environ.get("FILE_IMAGE_COMPRESSION_WIDTH")) + if os.environ.get("FILE_IMAGE_COMPRESSION_WIDTH") + else None + ), +) + +FILE_IMAGE_COMPRESSION_HEIGHT = PersistentConfig( + "FILE_IMAGE_COMPRESSION_HEIGHT", + "file.image_compression_height", + ( + int(os.environ.get("FILE_IMAGE_COMPRESSION_HEIGHT")) + if os.environ.get("FILE_IMAGE_COMPRESSION_HEIGHT") + else None + ), +) + + RAG_ALLOWED_FILE_EXTENSIONS = PersistentConfig( "RAG_ALLOWED_FILE_EXTENSIONS", "rag.file.allowed_extensions", @@ -2901,6 +2927,18 @@ AUDIO_STT_MODEL = PersistentConfig( os.getenv("AUDIO_STT_MODEL", ""), ) +AUDIO_STT_SUPPORTED_CONTENT_TYPES = PersistentConfig( + "AUDIO_STT_SUPPORTED_CONTENT_TYPES", + "audio.stt.supported_content_types", + [ + content_type.strip() + for content_type in os.environ.get( + "AUDIO_STT_SUPPORTED_CONTENT_TYPES", "" + ).split(",") + if content_type.strip() + ], +) + AUDIO_STT_AZURE_API_KEY = PersistentConfig( "AUDIO_STT_AZURE_API_KEY", "audio.stt.azure.api_key", @@ -3075,3 +3113,22 @@ LDAP_VALIDATE_CERT = PersistentConfig( LDAP_CIPHERS = PersistentConfig( "LDAP_CIPHERS", "ldap.server.ciphers", os.environ.get("LDAP_CIPHERS", "ALL") ) + +# For LDAP Group Management +ENABLE_LDAP_GROUP_MANAGEMENT = PersistentConfig( + "ENABLE_LDAP_GROUP_MANAGEMENT", + "ldap.group.enable_management", + os.environ.get("ENABLE_LDAP_GROUP_MANAGEMENT", "False").lower() == "true", +) + +ENABLE_LDAP_GROUP_CREATION = PersistentConfig( + "ENABLE_LDAP_GROUP_CREATION", + "ldap.group.enable_creation", + os.environ.get("ENABLE_LDAP_GROUP_CREATION", "False").lower() == "true", +) + +LDAP_ATTRIBUTE_FOR_GROUPS = PersistentConfig( + "LDAP_ATTRIBUTE_FOR_GROUPS", + "ldap.server.attribute_for_groups", + os.environ.get("LDAP_ATTRIBUTE_FOR_GROUPS", "memberOf"), +) diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index 7601748376..0f7b5611f5 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -539,6 +539,7 @@ AUDIT_EXCLUDED_PATHS = [path.lstrip("/") for path in AUDIT_EXCLUDED_PATHS] #################################### ENABLE_OTEL = os.environ.get("ENABLE_OTEL", "False").lower() == "true" +ENABLE_OTEL_METRICS = os.environ.get("ENABLE_OTEL_METRICS", "False").lower() == "true" OTEL_EXPORTER_OTLP_ENDPOINT = os.environ.get( "OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4317" ) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index b6f26a8278..544756a6e8 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -57,6 +57,8 @@ from open_webui.utils.logger import start_logger from open_webui.socket.main import ( app as socket_app, periodic_usage_pool_cleanup, + get_models_in_use, + get_active_user_ids, ) from open_webui.routers import ( audio, @@ -157,6 +159,7 @@ from open_webui.config import ( # Audio AUDIO_STT_ENGINE, AUDIO_STT_MODEL, + AUDIO_STT_SUPPORTED_CONTENT_TYPES, AUDIO_STT_OPENAI_API_BASE_URL, AUDIO_STT_OPENAI_API_KEY, AUDIO_STT_AZURE_API_KEY, @@ -208,6 +211,8 @@ from open_webui.config import ( RAG_ALLOWED_FILE_EXTENSIONS, RAG_FILE_MAX_COUNT, RAG_FILE_MAX_SIZE, + FILE_IMAGE_COMPRESSION_WIDTH, + FILE_IMAGE_COMPRESSION_HEIGHT, RAG_OPENAI_API_BASE_URL, RAG_OPENAI_API_KEY, RAG_AZURE_OPENAI_BASE_URL, @@ -349,6 +354,10 @@ from open_webui.config import ( LDAP_CA_CERT_FILE, LDAP_VALIDATE_CERT, LDAP_CIPHERS, + # LDAP Group Management + ENABLE_LDAP_GROUP_MANAGEMENT, + ENABLE_LDAP_GROUP_CREATION, + LDAP_ATTRIBUTE_FOR_GROUPS, # Misc ENV, CACHE_DIR, @@ -676,6 +685,11 @@ app.state.config.LDAP_CA_CERT_FILE = LDAP_CA_CERT_FILE app.state.config.LDAP_VALIDATE_CERT = LDAP_VALIDATE_CERT app.state.config.LDAP_CIPHERS = LDAP_CIPHERS +# For LDAP Group Management +app.state.config.ENABLE_LDAP_GROUP_MANAGEMENT = ENABLE_LDAP_GROUP_MANAGEMENT +app.state.config.ENABLE_LDAP_GROUP_CREATION = ENABLE_LDAP_GROUP_CREATION +app.state.config.LDAP_ATTRIBUTE_FOR_GROUPS = LDAP_ATTRIBUTE_FOR_GROUPS + app.state.AUTH_TRUSTED_EMAIL_HEADER = WEBUI_AUTH_TRUSTED_EMAIL_HEADER app.state.AUTH_TRUSTED_NAME_HEADER = WEBUI_AUTH_TRUSTED_NAME_HEADER @@ -701,9 +715,13 @@ app.state.config.TOP_K = RAG_TOP_K app.state.config.TOP_K_RERANKER = RAG_TOP_K_RERANKER app.state.config.RELEVANCE_THRESHOLD = RAG_RELEVANCE_THRESHOLD app.state.config.HYBRID_BM25_WEIGHT = RAG_HYBRID_BM25_WEIGHT + + app.state.config.ALLOWED_FILE_EXTENSIONS = RAG_ALLOWED_FILE_EXTENSIONS app.state.config.FILE_MAX_SIZE = RAG_FILE_MAX_SIZE app.state.config.FILE_MAX_COUNT = RAG_FILE_MAX_COUNT +app.state.config.FILE_IMAGE_COMPRESSION_WIDTH = FILE_IMAGE_COMPRESSION_WIDTH +app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT = FILE_IMAGE_COMPRESSION_HEIGHT app.state.config.RAG_FULL_CONTEXT = RAG_FULL_CONTEXT @@ -948,10 +966,12 @@ app.state.config.IMAGE_STEPS = IMAGE_STEPS # ######################################## -app.state.config.STT_OPENAI_API_BASE_URL = AUDIO_STT_OPENAI_API_BASE_URL -app.state.config.STT_OPENAI_API_KEY = AUDIO_STT_OPENAI_API_KEY app.state.config.STT_ENGINE = AUDIO_STT_ENGINE app.state.config.STT_MODEL = AUDIO_STT_MODEL +app.state.config.STT_SUPPORTED_CONTENT_TYPES = AUDIO_STT_SUPPORTED_CONTENT_TYPES + +app.state.config.STT_OPENAI_API_BASE_URL = AUDIO_STT_OPENAI_API_BASE_URL +app.state.config.STT_OPENAI_API_KEY = AUDIO_STT_OPENAI_API_KEY app.state.config.WHISPER_MODEL = WHISPER_MODEL app.state.config.WHISPER_VAD_FILTER = WHISPER_VAD_FILTER @@ -1362,6 +1382,17 @@ async def chat_completion( request, response, form_data, user, metadata, model, events, tasks ) except Exception as e: + log.debug(f"Error in chat completion: {e}") + if metadata.get("chat_id") and metadata.get("message_id"): + # Update the chat message with the error + Chats.upsert_message_to_chat_by_id_and_message_id( + metadata["chat_id"], + metadata["message_id"], + { + "error": {"content": str(e)}, + }, + ) + raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=str(e), @@ -1533,6 +1564,10 @@ async def get_app_config(request: Request): "file": { "max_size": app.state.config.FILE_MAX_SIZE, "max_count": app.state.config.FILE_MAX_COUNT, + "image_compression": { + "width": app.state.config.FILE_IMAGE_COMPRESSION_WIDTH, + "height": app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT, + }, }, "permissions": {**app.state.config.USER_PERMISSIONS}, "google_drive": { @@ -1618,6 +1653,19 @@ async def get_app_changelog(): return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5} +@app.get("/api/usage") +async def get_current_usage(user=Depends(get_verified_user)): + """ + Get current usage statistics for Open WebUI. + This is an experimental endpoint and subject to change. + """ + try: + return {"model_ids": get_models_in_use(), "user_ids": get_active_user_ids()} + except Exception as e: + log.error(f"Error getting usage statistics: {e}") + raise HTTPException(status_code=500, detail="Internal Server Error") + + ############################ # OAuth Login & Callback ############################ diff --git a/backend/open_webui/models/groups.py b/backend/open_webui/models/groups.py index df79284cfa..096041e40f 100644 --- a/backend/open_webui/models/groups.py +++ b/backend/open_webui/models/groups.py @@ -207,9 +207,39 @@ class GroupTable: except Exception: return False - def sync_user_groups_by_group_names( + def create_groups_by_group_names( self, user_id: str, group_names: list[str] - ) -> bool: + ) -> list[GroupModel]: + + # check for existing groups + existing_groups = self.get_groups() + existing_group_names = {group.name for group in existing_groups} + + new_groups = [] + + with get_db() as db: + for group_name in group_names: + if group_name not in existing_group_names: + new_group = GroupModel( + id=str(uuid.uuid4()), + user_id=user_id, + name=group_name, + description="", + created_at=int(time.time()), + updated_at=int(time.time()), + ) + try: + result = Group(**new_group.model_dump()) + db.add(result) + db.commit() + db.refresh(result) + new_groups.append(GroupModel.model_validate(result)) + except Exception as e: + log.exception(e) + continue + return new_groups + + def sync_groups_by_group_names(self, user_id: str, group_names: list[str]) -> bool: with get_db() as db: try: groups = db.query(Group).filter(Group.name.in_(group_names)).all() diff --git a/backend/open_webui/retrieval/loaders/external_document.py b/backend/open_webui/retrieval/loaders/external_document.py index 6119da3791..c0ccd72432 100644 --- a/backend/open_webui/retrieval/loaders/external_document.py +++ b/backend/open_webui/retrieval/loaders/external_document.py @@ -1,5 +1,5 @@ import requests -import logging +import logging, os from typing import Iterator, List, Union from langchain_core.document_loaders import BaseLoader @@ -25,7 +25,7 @@ class ExternalDocumentLoader(BaseLoader): self.file_path = file_path self.mime_type = mime_type - def load(self) -> list[Document]: + def load(self) -> List[Document]: with open(self.file_path, "rb") as f: data = f.read() @@ -36,23 +36,48 @@ class ExternalDocumentLoader(BaseLoader): if self.api_key is not None: headers["Authorization"] = f"Bearer {self.api_key}" + try: + headers["X-Filename"] = os.path.basename(self.file_path) + except: + pass + url = self.url if url.endswith("/"): url = url[:-1] - r = requests.put(f"{url}/process", data=data, headers=headers) + try: + response = requests.put(f"{url}/process", data=data, headers=headers) + except Exception as e: + log.error(f"Error connecting to endpoint: {e}") + raise Exception(f"Error connecting to endpoint: {e}") - if r.ok: - res = r.json() + if response.ok: + + response_data = response.json() + if response_data: + if isinstance(response_data, dict): + return [ + Document( + page_content=response_data.get("page_content"), + metadata=response_data.get("metadata"), + ) + ] + elif isinstance(response_data, list): + documents = [] + for document in response_data: + documents.append( + Document( + page_content=document.get("page_content"), + metadata=document.get("metadata"), + ) + ) + return documents + else: + raise Exception("Error loading document: Unable to parse content") - if res: - return [ - Document( - page_content=res.get("page_content"), - metadata=res.get("metadata"), - ) - ] else: raise Exception("Error loading document: No content returned") else: - raise Exception(f"Error loading document: {r.status_code} {r.text}") + raise Exception( + f"Error loading document: {response.status_code} {response.text}" + ) diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index fd1f606761..8ac878fc22 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -162,15 +162,15 @@ class DoclingLoader: if picture_description_mode == "local" and self.params.get( "picture_description_local", {} ): - params["picture_description_local"] = self.params.get( - "picture_description_local", {} + params["picture_description_local"] = json.dumps( + self.params.get("picture_description_local", {}) ) elif picture_description_mode == "api" and self.params.get( "picture_description_api", {} ): - params["picture_description_api"] = self.params.get( - "picture_description_api", {} + params["picture_description_api"] = json.dumps( + self.params.get("picture_description_api", {}) ) if self.params.get("ocr_engine") and self.params.get("ocr_lang"): diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py index eac5839d96..27634cec19 100644 --- a/backend/open_webui/routers/audio.py +++ b/backend/open_webui/routers/audio.py @@ -10,7 +10,7 @@ from pydub.silence import split_on_silence from concurrent.futures import ThreadPoolExecutor from typing import Optional - +from fnmatch import fnmatch import aiohttp import aiofiles import requests @@ -168,6 +168,7 @@ class STTConfigForm(BaseModel): OPENAI_API_KEY: str ENGINE: str MODEL: str + SUPPORTED_CONTENT_TYPES: list[str] = [] WHISPER_MODEL: str DEEPGRAM_API_KEY: str AZURE_API_KEY: str @@ -202,6 +203,7 @@ async def get_audio_config(request: Request, user=Depends(get_admin_user)): "OPENAI_API_KEY": request.app.state.config.STT_OPENAI_API_KEY, "ENGINE": request.app.state.config.STT_ENGINE, "MODEL": request.app.state.config.STT_MODEL, + "SUPPORTED_CONTENT_TYPES": request.app.state.config.STT_SUPPORTED_CONTENT_TYPES, "WHISPER_MODEL": request.app.state.config.WHISPER_MODEL, "DEEPGRAM_API_KEY": request.app.state.config.DEEPGRAM_API_KEY, "AZURE_API_KEY": request.app.state.config.AUDIO_STT_AZURE_API_KEY, @@ -236,6 +238,10 @@ async def update_audio_config( request.app.state.config.STT_OPENAI_API_KEY = form_data.stt.OPENAI_API_KEY request.app.state.config.STT_ENGINE = form_data.stt.ENGINE request.app.state.config.STT_MODEL = form_data.stt.MODEL + request.app.state.config.STT_SUPPORTED_CONTENT_TYPES = ( + form_data.stt.SUPPORTED_CONTENT_TYPES + ) + request.app.state.config.WHISPER_MODEL = form_data.stt.WHISPER_MODEL request.app.state.config.DEEPGRAM_API_KEY = form_data.stt.DEEPGRAM_API_KEY request.app.state.config.AUDIO_STT_AZURE_API_KEY = form_data.stt.AZURE_API_KEY @@ -250,6 +256,8 @@ async def update_audio_config( request.app.state.faster_whisper_model = set_faster_whisper_model( form_data.stt.WHISPER_MODEL, WHISPER_MODEL_AUTO_UPDATE ) + else: + request.app.state.faster_whisper_model = None return { "tts": { @@ -269,6 +277,7 @@ async def update_audio_config( "OPENAI_API_KEY": request.app.state.config.STT_OPENAI_API_KEY, "ENGINE": request.app.state.config.STT_ENGINE, "MODEL": request.app.state.config.STT_MODEL, + "SUPPORTED_CONTENT_TYPES": request.app.state.config.STT_SUPPORTED_CONTENT_TYPES, "WHISPER_MODEL": request.app.state.config.WHISPER_MODEL, "DEEPGRAM_API_KEY": request.app.state.config.DEEPGRAM_API_KEY, "AZURE_API_KEY": request.app.state.config.AUDIO_STT_AZURE_API_KEY, @@ -628,7 +637,7 @@ def transcription_handler(request, file_path, metadata): # Make request to Deepgram API r = requests.post( - "https://api.deepgram.com/v1/listen", + "https://api.deepgram.com/v1/listen?smart_format=true", headers=headers, params=params, data=file_data, @@ -910,10 +919,14 @@ def transcription( ): log.info(f"file.content_type: {file.content_type}") - SUPPORTED_CONTENT_TYPES = {"video/webm"} # Extend if you add more video types! - if not ( - file.content_type.startswith("audio/") - or file.content_type in SUPPORTED_CONTENT_TYPES + supported_content_types = request.app.state.config.STT_SUPPORTED_CONTENT_TYPES or [ + "audio/*", + "video/webm", + ] + + if not any( + fnmatch(file.content_type, content_type) + for content_type in supported_content_types ): raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, diff --git a/backend/open_webui/routers/auths.py b/backend/open_webui/routers/auths.py index 06e506228a..60a12db4b3 100644 --- a/backend/open_webui/routers/auths.py +++ b/backend/open_webui/routers/auths.py @@ -55,9 +55,8 @@ from typing import Optional, List from ssl import CERT_NONE, CERT_REQUIRED, PROTOCOL_TLS -if ENABLE_LDAP.value: - from ldap3 import Server, Connection, NONE, Tls - from ldap3.utils.conv import escape_filter_chars +from ldap3 import Server, Connection, NONE, Tls +from ldap3.utils.conv import escape_filter_chars router = APIRouter() @@ -229,14 +228,30 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm): if not connection_app.bind(): raise HTTPException(400, detail="Application account bind failed") + ENABLE_LDAP_GROUP_MANAGEMENT = ( + request.app.state.config.ENABLE_LDAP_GROUP_MANAGEMENT + ) + ENABLE_LDAP_GROUP_CREATION = request.app.state.config.ENABLE_LDAP_GROUP_CREATION + LDAP_ATTRIBUTE_FOR_GROUPS = request.app.state.config.LDAP_ATTRIBUTE_FOR_GROUPS + + search_attributes = [ + f"{LDAP_ATTRIBUTE_FOR_USERNAME}", + f"{LDAP_ATTRIBUTE_FOR_MAIL}", + "cn", + ] + + if ENABLE_LDAP_GROUP_MANAGEMENT: + search_attributes.append(f"{LDAP_ATTRIBUTE_FOR_GROUPS}") + log.info( + f"LDAP Group Management enabled. Adding {LDAP_ATTRIBUTE_FOR_GROUPS} to search attributes" + ) + + log.info(f"LDAP search attributes: {search_attributes}") + search_success = connection_app.search( search_base=LDAP_SEARCH_BASE, search_filter=f"(&({LDAP_ATTRIBUTE_FOR_USERNAME}={escape_filter_chars(form_data.user.lower())}){LDAP_SEARCH_FILTERS})", - attributes=[ - f"{LDAP_ATTRIBUTE_FOR_USERNAME}", - f"{LDAP_ATTRIBUTE_FOR_MAIL}", - "cn", - ], + attributes=search_attributes, ) if not search_success or not connection_app.entries: @@ -259,6 +274,69 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm): cn = str(entry["cn"]) user_dn = entry.entry_dn + user_groups = [] + if ENABLE_LDAP_GROUP_MANAGEMENT and LDAP_ATTRIBUTE_FOR_GROUPS in entry: + group_dns = entry[LDAP_ATTRIBUTE_FOR_GROUPS] + log.info(f"LDAP raw group DNs for user {username}: {group_dns}") + + if group_dns: + log.info(f"LDAP group_dns original: {group_dns}") + log.info(f"LDAP group_dns type: {type(group_dns)}") + log.info(f"LDAP group_dns length: {len(group_dns)}") + + if hasattr(group_dns, "value"): + group_dns = group_dns.value + log.info(f"Extracted .value property: {group_dns}") + elif hasattr(group_dns, "__iter__") and not isinstance( + group_dns, (str, bytes) + ): + group_dns = list(group_dns) + log.info(f"Converted to list: {group_dns}") + + if isinstance(group_dns, list): + group_dns = [str(item) for item in group_dns] + else: + group_dns = [str(group_dns)] + + log.info( + f"LDAP group_dns after processing - type: {type(group_dns)}, length: {len(group_dns)}" + ) + + for group_idx, group_dn in enumerate(group_dns): + group_dn = str(group_dn) + log.info(f"Processing group DN #{group_idx + 1}: {group_dn}") + + try: + group_cn = None + + for item in group_dn.split(","): + item = item.strip() + if item.upper().startswith("CN="): + group_cn = item[3:] + break + + if group_cn: + user_groups.append(group_cn) + + else: + log.warning( + f"Could not extract CN from group DN: {group_dn}" + ) + except Exception as e: + log.warning( + f"Failed to extract group name from DN {group_dn}: {e}" + ) + + log.info( + f"LDAP groups for user {username}: {user_groups} (total: {len(user_groups)})" + ) + else: + log.info(f"No groups found for user {username}") + elif ENABLE_LDAP_GROUP_MANAGEMENT: + log.warning( + f"LDAP Group Management enabled but {LDAP_ATTRIBUTE_FOR_GROUPS} attribute not found in user entry" + ) + if username == form_data.user.lower(): connection_user = Connection( server, @@ -334,6 +412,22 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm): user.id, request.app.state.config.USER_PERMISSIONS ) + if ( + user.role != "admin" + and ENABLE_LDAP_GROUP_MANAGEMENT + and user_groups + ): + if ENABLE_LDAP_GROUP_CREATION: + Groups.create_groups_by_group_names(user.id, user_groups) + + try: + Groups.sync_groups_by_group_names(user.id, user_groups) + log.info( + f"Successfully synced groups for user {user.id}: {user_groups}" + ) + except Exception as e: + log.error(f"Failed to sync groups for user {user.id}: {e}") + return { "token": token, "token_type": "Bearer", @@ -386,7 +480,7 @@ async def signin(request: Request, response: Response, form_data: SigninForm): group_names = [name.strip() for name in group_names if name.strip()] if group_names: - Groups.sync_user_groups_by_group_names(user.id, group_names) + Groups.sync_groups_by_group_names(user.id, group_names) elif WEBUI_AUTH == False: admin_email = "admin@localhost" diff --git a/backend/open_webui/routers/files.py b/backend/open_webui/routers/files.py index ba6758671e..b9bb15c7b4 100644 --- a/backend/open_webui/routers/files.py +++ b/backend/open_webui/routers/files.py @@ -155,9 +155,18 @@ def upload_file( if process: try: if file.content_type: - if file.content_type.startswith("audio/") or file.content_type in { - "video/webm" - }: + stt_supported_content_types = ( + request.app.state.config.STT_SUPPORTED_CONTENT_TYPES + or [ + "audio/*", + "video/webm", + ] + ) + + if any( + fnmatch(file.content_type, content_type) + for content_type in stt_supported_content_types + ): file_path = Storage.get_file(file_path) result = transcribe(request, file_path, file_metadata) diff --git a/backend/open_webui/routers/notes.py b/backend/open_webui/routers/notes.py index 94f8325d70..2cbbd331b5 100644 --- a/backend/open_webui/routers/notes.py +++ b/backend/open_webui/routers/notes.py @@ -124,9 +124,9 @@ async def get_note_by_id(request: Request, id: str, user=Depends(get_verified_us status_code=status.HTTP_404_NOT_FOUND, detail=ERROR_MESSAGES.NOT_FOUND ) - if user.role != "admin" or ( + if user.role != "admin" and ( user.id != note.user_id - and not has_access(user.id, type="read", access_control=note.access_control) + and (not has_access(user.id, type="read", access_control=note.access_control)) ): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT() @@ -158,7 +158,7 @@ async def update_note_by_id( status_code=status.HTTP_404_NOT_FOUND, detail=ERROR_MESSAGES.NOT_FOUND ) - if user.role != "admin" or ( + if user.role != "admin" and ( user.id != note.user_id and not has_access(user.id, type="write", access_control=note.access_control) ): @@ -197,7 +197,7 @@ async def delete_note_by_id(request: Request, id: str, user=Depends(get_verified status_code=status.HTTP_404_NOT_FOUND, detail=ERROR_MESSAGES.NOT_FOUND ) - if user.role != "admin" or ( + if user.role != "admin" and ( user.id != note.user_id and not has_access(user.id, type="write", access_control=note.access_control) ): diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index ea46a1cca7..1353599374 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -16,6 +16,8 @@ from urllib.parse import urlparse import aiohttp from aiocache import cached import requests + +from open_webui.models.chats import Chats from open_webui.models.users import UserModel from open_webui.env import ( @@ -147,8 +149,23 @@ async def send_post_request( }, ssl=AIOHTTP_CLIENT_SESSION_SSL, ) - r.raise_for_status() + if r.ok is False: + try: + res = await r.json() + await cleanup_response(r, session) + if "error" in res: + raise HTTPException(status_code=r.status, detail=res["error"]) + except HTTPException as e: + raise e # Re-raise HTTPException to be handled by FastAPI + except Exception as e: + log.error(f"Failed to parse error response: {e}") + raise HTTPException( + status_code=r.status, + detail=f"Open WebUI: Server Connection Error", + ) + + r.raise_for_status() # Raises an error for bad responses (4xx, 5xx) if stream: response_headers = dict(r.headers) @@ -168,20 +185,14 @@ async def send_post_request( await cleanup_response(r, session) return res + except HTTPException as e: + raise e # Re-raise HTTPException to be handled by FastAPI except Exception as e: - detail = None - - if r is not None: - try: - res = await r.json() - if "error" in res: - detail = f"Ollama: {res.get('error', 'Unknown error')}" - except Exception: - detail = f"Ollama: {e}" + detail = f"Ollama: {e}" raise HTTPException( status_code=r.status if r else 500, - detail=detail if detail else "Open WebUI: Server Connection Error", + detail=detail if e else "Open WebUI: Server Connection Error", ) diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 2bd73c25e3..ee6f99fbb5 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -432,6 +432,8 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): # File upload settings "FILE_MAX_SIZE": request.app.state.config.FILE_MAX_SIZE, "FILE_MAX_COUNT": request.app.state.config.FILE_MAX_COUNT, + "FILE_IMAGE_COMPRESSION_WIDTH": request.app.state.config.FILE_IMAGE_COMPRESSION_WIDTH, + "FILE_IMAGE_COMPRESSION_HEIGHT": request.app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT, "ALLOWED_FILE_EXTENSIONS": request.app.state.config.ALLOWED_FILE_EXTENSIONS, # Integration settings "ENABLE_GOOGLE_DRIVE_INTEGRATION": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, @@ -599,6 +601,8 @@ class ConfigForm(BaseModel): # File upload settings FILE_MAX_SIZE: Optional[int] = None FILE_MAX_COUNT: Optional[int] = None + FILE_IMAGE_COMPRESSION_WIDTH: Optional[int] = None + FILE_IMAGE_COMPRESSION_HEIGHT: Optional[int] = None ALLOWED_FILE_EXTENSIONS: Optional[List[str]] = None # Integration settings @@ -847,15 +851,13 @@ async def update_rag_config( ) # File upload settings - request.app.state.config.FILE_MAX_SIZE = ( - form_data.FILE_MAX_SIZE - if form_data.FILE_MAX_SIZE is not None - else request.app.state.config.FILE_MAX_SIZE + request.app.state.config.FILE_MAX_SIZE = form_data.FILE_MAX_SIZE + request.app.state.config.FILE_MAX_COUNT = form_data.FILE_MAX_COUNT + request.app.state.config.FILE_IMAGE_COMPRESSION_WIDTH = ( + form_data.FILE_IMAGE_COMPRESSION_WIDTH ) - request.app.state.config.FILE_MAX_COUNT = ( - form_data.FILE_MAX_COUNT - if form_data.FILE_MAX_COUNT is not None - else request.app.state.config.FILE_MAX_COUNT + request.app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT = ( + form_data.FILE_IMAGE_COMPRESSION_HEIGHT ) request.app.state.config.ALLOWED_FILE_EXTENSIONS = ( form_data.ALLOWED_FILE_EXTENSIONS @@ -1025,6 +1027,8 @@ async def update_rag_config( # File upload settings "FILE_MAX_SIZE": request.app.state.config.FILE_MAX_SIZE, "FILE_MAX_COUNT": request.app.state.config.FILE_MAX_COUNT, + "FILE_IMAGE_COMPRESSION_WIDTH": request.app.state.config.FILE_IMAGE_COMPRESSION_WIDTH, + "FILE_IMAGE_COMPRESSION_HEIGHT": request.app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT, "ALLOWED_FILE_EXTENSIONS": request.app.state.config.ALLOWED_FILE_EXTENSIONS, # Integration settings "ENABLE_GOOGLE_DRIVE_INTEGRATION": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, @@ -1867,6 +1871,10 @@ async def process_web_search( try: if request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER: + search_results = [ + item for result in search_results for item in result if result + ] + docs = [ Document( page_content=result.snippet, diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py index 4046dc72d8..16cc2c375f 100644 --- a/backend/open_webui/routers/users.py +++ b/backend/open_webui/routers/users.py @@ -14,7 +14,11 @@ from open_webui.models.users import ( ) -from open_webui.socket.main import get_active_status_by_user_id +from open_webui.socket.main import ( + get_active_status_by_user_id, + get_active_user_ids, + get_user_active_status, +) from open_webui.constants import ERROR_MESSAGES from open_webui.env import SRC_LOG_LEVELS from fastapi import APIRouter, Depends, HTTPException, Request, status @@ -29,6 +33,24 @@ log.setLevel(SRC_LOG_LEVELS["MODELS"]) router = APIRouter() + +############################ +# GetActiveUsers +############################ + + +@router.get("/active") +async def get_active_users( + user=Depends(get_verified_user), +): + """ + Get a list of active users. + """ + return { + "user_ids": get_active_user_ids(), + } + + ############################ # GetUsers ############################ @@ -111,6 +133,7 @@ class SharingPermissions(BaseModel): class ChatPermissions(BaseModel): controls: bool = True + system_prompt: bool = True file_upload: bool = True delete: bool = True edit: bool = True @@ -303,6 +326,18 @@ async def get_user_by_id(user_id: str, user=Depends(get_verified_user)): ) +############################ +# GetUserActiveStatusById +############################ + + +@router.get("/{user_id}/active", response_model=dict) +async def get_user_active_status_by_id(user_id: str, user=Depends(get_verified_user)): + return { + "active": get_user_active_status(user_id), + } + + ############################ # UpdateUserById ############################ diff --git a/backend/open_webui/socket/main.py b/backend/open_webui/socket/main.py index 09eccd8267..35e40dccb2 100644 --- a/backend/open_webui/socket/main.py +++ b/backend/open_webui/socket/main.py @@ -135,11 +135,6 @@ async def periodic_usage_pool_cleanup(): USAGE_POOL[model_id] = connections send_usage = True - - if send_usage: - # Emit updated usage information after cleaning - await sio.emit("usage", {"models": get_models_in_use()}) - await asyncio.sleep(TIMEOUT_DURATION) finally: release_func() @@ -157,6 +152,43 @@ def get_models_in_use(): return models_in_use +def get_active_user_ids(): + """Get the list of active user IDs.""" + return list(USER_POOL.keys()) + + +def get_user_active_status(user_id): + """Check if a user is currently active.""" + return user_id in USER_POOL + + +def get_user_id_from_session_pool(sid): + user = SESSION_POOL.get(sid) + if user: + return user["id"] + return None + + +def get_user_ids_from_room(room): + active_session_ids = sio.manager.get_participants( + namespace="/", + room=room, + ) + + active_user_ids = list( + set( + [SESSION_POOL.get(session_id[0])["id"] for session_id in active_session_ids] + ) + ) + return active_user_ids + + +def get_active_status_by_user_id(user_id): + if user_id in USER_POOL: + return True + return False + + @sio.on("usage") async def usage(sid, data): if sid in SESSION_POOL: @@ -170,9 +202,6 @@ async def usage(sid, data): sid: {"updated_at": current_time}, } - # Broadcast the usage data to all clients - await sio.emit("usage", {"models": get_models_in_use()}) - @sio.event async def connect(sid, environ, auth): @@ -190,10 +219,6 @@ async def connect(sid, environ, auth): else: USER_POOL[user.id] = [sid] - # print(f"user {user.name}({user.id}) connected with session ID {sid}") - await sio.emit("user-list", {"user_ids": list(USER_POOL.keys())}) - await sio.emit("usage", {"models": get_models_in_use()}) - @sio.on("user-join") async def user_join(sid, data): @@ -221,10 +246,6 @@ async def user_join(sid, data): log.debug(f"{channels=}") for channel in channels: await sio.enter_room(sid, f"channel:{channel.id}") - - # print(f"user {user.name}({user.id}) connected with session ID {sid}") - - await sio.emit("user-list", {"user_ids": list(USER_POOL.keys())}) return {"id": user.id, "name": user.name} @@ -277,12 +298,6 @@ async def channel_events(sid, data): ) -@sio.on("user-list") -async def user_list(sid): - if sid in SESSION_POOL: - await sio.emit("user-list", {"user_ids": list(USER_POOL.keys())}) - - @sio.event async def disconnect(sid): if sid in SESSION_POOL: @@ -294,8 +309,6 @@ async def disconnect(sid): if len(USER_POOL[user_id]) == 0: del USER_POOL[user_id] - - await sio.emit("user-list", {"user_ids": list(USER_POOL.keys())}) else: pass # print(f"Unknown session ID {sid} disconnected") @@ -388,30 +401,3 @@ def get_event_call(request_info): get_event_caller = get_event_call - - -def get_user_id_from_session_pool(sid): - user = SESSION_POOL.get(sid) - if user: - return user["id"] - return None - - -def get_user_ids_from_room(room): - active_session_ids = sio.manager.get_participants( - namespace="/", - room=room, - ) - - active_user_ids = list( - set( - [SESSION_POOL.get(session_id[0])["id"] for session_id in active_session_ids] - ) - ) - return active_user_ids - - -def get_active_status_by_user_id(user_id): - if user_id in USER_POOL: - return True - return False diff --git a/backend/open_webui/static/apple-touch-icon.png b/backend/open_webui/static/apple-touch-icon.png index ece4b85dbc..9807373436 100644 Binary files a/backend/open_webui/static/apple-touch-icon.png and b/backend/open_webui/static/apple-touch-icon.png differ diff --git a/backend/open_webui/static/custom.css b/backend/open_webui/static/custom.css new file mode 100644 index 0000000000..e69de29bb2 diff --git a/backend/open_webui/utils/access_control.py b/backend/open_webui/utils/access_control.py index 1699cfaa7c..c93574527f 100644 --- a/backend/open_webui/utils/access_control.py +++ b/backend/open_webui/utils/access_control.py @@ -60,7 +60,7 @@ def get_permissions( # Combine permissions from all user groups for group in user_groups: - group_permissions = group.permissions + group_permissions = group.permissions or {} permissions = combine_permissions(permissions, group_permissions) # Ensure all fields from default_permissions are present and filled in diff --git a/backend/open_webui/utils/auth.py b/backend/open_webui/utils/auth.py index c8c1f13727..9befaf2a91 100644 --- a/backend/open_webui/utils/auth.py +++ b/backend/open_webui/utils/auth.py @@ -228,7 +228,9 @@ def get_current_user( ) else: if WEBUI_AUTH_TRUSTED_EMAIL_HEADER: - trusted_email = request.headers.get(WEBUI_AUTH_TRUSTED_EMAIL_HEADER) + trusted_email = request.headers.get( + WEBUI_AUTH_TRUSTED_EMAIL_HEADER, "" + ).lower() if trusted_email and user.email != trusted_email: # Delete the token cookie response.delete_cookie("token") diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index 0106779a87..b1e69db264 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -697,7 +697,7 @@ def apply_params_to_form_data(form_data, model): # If custom_params are provided, merge them into params params = deep_update(params, custom_params) - if model.get("ollama"): + if model.get("owned_by") == "ollama": # Ollama specific parameters form_data["options"] = params else: @@ -1078,6 +1078,7 @@ async def process_chat_response( follow_ups = json.loads(follow_ups_string).get( "follow_ups", [] ) + Chats.upsert_message_to_chat_by_id_and_message_id( metadata["chat_id"], metadata["message_id"], @@ -1098,7 +1099,12 @@ async def process_chat_response( pass if TASKS.TITLE_GENERATION in tasks: + user_message = get_last_user_message(messages) + if user_message and len(user_message) > 100: + user_message = user_message[:100] + "..." + if tasks[TASKS.TITLE_GENERATION]: + res = await generate_title( request, { @@ -1114,7 +1120,9 @@ async def process_chat_response( title_string = ( res.get("choices", [])[0] .get("message", {}) - .get("content", message.get("content", "New Chat")) + .get( + "content", message.get("content", user_message) + ) ) else: title_string = "" @@ -1125,13 +1133,13 @@ async def process_chat_response( try: title = json.loads(title_string).get( - "title", "New Chat" + "title", user_message ) except Exception as e: title = "" if not title: - title = messages[0].get("content", "New Chat") + title = messages[0].get("content", user_message) Chats.update_chat_title_by_id(metadata["chat_id"], title) @@ -1142,14 +1150,14 @@ async def process_chat_response( } ) elif len(messages) == 2: - title = messages[0].get("content", "New Chat") + title = messages[0].get("content", user_message) Chats.update_chat_title_by_id(metadata["chat_id"], title) await event_emitter( { "type": "chat:title", - "data": message.get("content", "New Chat"), + "data": message.get("content", user_message), } ) @@ -2053,28 +2061,38 @@ async def process_chat_response( tools = metadata.get("tools", {}) results = [] + for tool_call in response_tool_calls: tool_call_id = tool_call.get("id", "") tool_name = tool_call.get("function", {}).get("name", "") + tool_args = tool_call.get("function", {}).get("arguments", "{}") tool_function_params = {} try: # json.loads cannot be used because some models do not produce valid JSON - tool_function_params = ast.literal_eval( - tool_call.get("function", {}).get("arguments", "{}") - ) + tool_function_params = ast.literal_eval(tool_args) except Exception as e: log.debug(e) # Fallback to JSON parsing try: - tool_function_params = json.loads( - tool_call.get("function", {}).get("arguments", "{}") - ) + tool_function_params = json.loads(tool_args) except Exception as e: - log.debug( - f"Error parsing tool call arguments: {tool_call.get('function', {}).get('arguments', '{}')}" + log.error( + f"Error parsing tool call arguments: {tool_args}" ) + # Mutate the original tool call response params as they are passed back to the passed + # back to the LLM via the content blocks. If they are in a json block and are invalid json, + # this can cause downstream LLM integrations to fail (e.g. bedrock gateway) where response + # params are not valid json. + # Main case so far is no args = "" = invalid json. + log.debug( + f"Parsed args from {tool_args} to {tool_function_params}" + ) + tool_call.setdefault("function", {})["arguments"] = json.dumps( + tool_function_params + ) + tool_result = None if tool_name in tools: diff --git a/backend/open_webui/utils/oauth.py b/backend/open_webui/utils/oauth.py index 6c98ed7dfa..2be9cda92a 100644 --- a/backend/open_webui/utils/oauth.py +++ b/backend/open_webui/utils/oauth.py @@ -537,8 +537,8 @@ class OAuthManager: ) # Redirect back to the frontend with the JWT token - redirect_base_url = request.app.state.config.WEBUI_URL or request.base_url - if isinstance(redirect_base_url, str) and redirect_base_url.endswith("/"): + redirect_base_url = str(request.app.state.config.WEBUI_URL or request.base_url) + if redirect_base_url.endswith("/"): redirect_base_url = redirect_base_url[:-1] redirect_url = f"{redirect_base_url}/auth#token={jwt_token}" diff --git a/backend/open_webui/utils/telemetry/metrics.py b/backend/open_webui/utils/telemetry/metrics.py new file mode 100644 index 0000000000..8a0298d091 --- /dev/null +++ b/backend/open_webui/utils/telemetry/metrics.py @@ -0,0 +1,110 @@ +"""OpenTelemetry metrics bootstrap for Open WebUI. + +This module initialises a MeterProvider that sends metrics to an OTLP +collector. The collector is responsible for exposing a Prometheus +`/metrics` endpoint – WebUI does **not** expose it directly. + +Metrics collected: + +* http.server.requests (counter) +* http.server.duration (histogram, milliseconds) + +Attributes used: http.method, http.route, http.status_code + +If you wish to add more attributes (e.g. user-agent) you can, but beware of +high-cardinality label sets. +""" + +from __future__ import annotations + +import time +from typing import Dict, List, Sequence, Any + +from fastapi import FastAPI, Request +from opentelemetry import metrics +from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( + OTLPMetricExporter, +) +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.view import View +from opentelemetry.sdk.metrics.export import ( + PeriodicExportingMetricReader, +) +from opentelemetry.sdk.resources import SERVICE_NAME, Resource + +from open_webui.env import OTEL_SERVICE_NAME, OTEL_EXPORTER_OTLP_ENDPOINT + + +_EXPORT_INTERVAL_MILLIS = 10_000 # 10 seconds + + +def _build_meter_provider() -> MeterProvider: + """Return a configured MeterProvider.""" + + # Periodic reader pushes metrics over OTLP/gRPC to collector + readers: List[PeriodicExportingMetricReader] = [ + PeriodicExportingMetricReader( + OTLPMetricExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT), + export_interval_millis=_EXPORT_INTERVAL_MILLIS, + ) + ] + + # Optional view to limit cardinality: drop user-agent etc. + views: List[View] = [ + View( + instrument_name="http.server.duration", + attribute_keys=["http.method", "http.route", "http.status_code"], + ), + View( + instrument_name="http.server.requests", + attribute_keys=["http.method", "http.route", "http.status_code"], + ), + ] + + provider = MeterProvider( + resource=Resource.create({SERVICE_NAME: OTEL_SERVICE_NAME}), + metric_readers=list(readers), + views=views, + ) + return provider + + +def setup_metrics(app: FastAPI) -> None: + """Attach OTel metrics middleware to *app* and initialise provider.""" + + metrics.set_meter_provider(_build_meter_provider()) + meter = metrics.get_meter(__name__) + + # Instruments + request_counter = meter.create_counter( + name="http.server.requests", + description="Total HTTP requests", + unit="1", + ) + duration_histogram = meter.create_histogram( + name="http.server.duration", + description="HTTP request duration", + unit="ms", + ) + + # FastAPI middleware + @app.middleware("http") + async def _metrics_middleware(request: Request, call_next): + start_time = time.perf_counter() + response = await call_next(request) + elapsed_ms = (time.perf_counter() - start_time) * 1000.0 + + # Route template e.g. "/items/{item_id}" instead of real path. + route = request.scope.get("route") + route_path = getattr(route, "path", request.url.path) + + attrs: Dict[str, str | int] = { + "http.method": request.method, + "http.route": route_path, + "http.status_code": response.status_code, + } + + request_counter.add(1, attrs) + duration_histogram.record(elapsed_ms, attrs) + + return response diff --git a/backend/open_webui/utils/telemetry/setup.py b/backend/open_webui/utils/telemetry/setup.py index eb6a238c8d..62632cff52 100644 --- a/backend/open_webui/utils/telemetry/setup.py +++ b/backend/open_webui/utils/telemetry/setup.py @@ -7,7 +7,12 @@ from sqlalchemy import Engine from open_webui.utils.telemetry.exporters import LazyBatchSpanProcessor from open_webui.utils.telemetry.instrumentors import Instrumentor -from open_webui.env import OTEL_SERVICE_NAME, OTEL_EXPORTER_OTLP_ENDPOINT +from open_webui.utils.telemetry.metrics import setup_metrics +from open_webui.env import ( + OTEL_SERVICE_NAME, + OTEL_EXPORTER_OTLP_ENDPOINT, + ENABLE_OTEL_METRICS, +) def setup(app: FastAPI, db_engine: Engine): @@ -21,3 +26,7 @@ def setup(app: FastAPI, db_engine: Engine): exporter = OTLPSpanExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT) trace.get_tracer_provider().add_span_processor(LazyBatchSpanProcessor(exporter)) Instrumentor(app=app, db_engine=db_engine).instrument() + + # set up metrics only if enabled + if ENABLE_OTEL_METRICS: + setup_metrics(app) diff --git a/backend/open_webui/utils/tools.py b/backend/open_webui/utils/tools.py index 0774522dbd..dda2635ec7 100644 --- a/backend/open_webui/utils/tools.py +++ b/backend/open_webui/utils/tools.py @@ -479,7 +479,7 @@ async def get_tool_server_data(token: str, url: str) -> Dict[str, Any]: "specs": convert_openapi_to_tool_payload(res), } - log.info("Fetched data:", data) + log.info(f"Fetched data: {data}") return data @@ -644,5 +644,5 @@ async def execute_tool_server( except Exception as err: error = str(err) - log.exception("API Request Error:", error) + log.exception(f"API Request Error: {error}") return {"error": error} diff --git a/backend/requirements.txt b/backend/requirements.txt index c4f19b61b0..7f7bf82fee 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -66,7 +66,7 @@ pypdf==4.3.1 fpdf2==2.8.2 pymdown-extensions==10.14.2 docx2txt==0.8 -python-pptx==1.0.0 +python-pptx==1.0.2 unstructured==0.16.17 nltk==3.9.1 Markdown==3.7 @@ -95,7 +95,7 @@ authlib==1.4.1 black==25.1.0 langfuse==2.44.0 -youtube-transcript-api==1.0.3 +youtube-transcript-api==1.1.0 pytube==15.0.0 extract_msg diff --git a/package-lock.json b/package-lock.json index 48835efcaf..d17e571808 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.6.14", + "version": "0.6.15", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.6.14", + "version": "0.6.15", "dependencies": { "@azure/msal-browser": "^4.5.0", "@codemirror/lang-javascript": "^6.2.2", diff --git a/package.json b/package.json index 3769edebe4..7f0d121be7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.6.14", + "version": "0.6.15", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", diff --git a/pyproject.toml b/pyproject.toml index 188827c738..9a964a8c9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,7 +73,7 @@ dependencies = [ "fpdf2==2.8.2", "pymdown-extensions==10.14.2", "docx2txt==0.8", - "python-pptx==1.0.0", + "python-pptx==1.0.2", "unstructured==0.16.17", "nltk==3.9.1", "Markdown==3.7", @@ -102,7 +102,7 @@ dependencies = [ "black==25.1.0", "langfuse==2.44.0", - "youtube-transcript-api==1.0.3", + "youtube-transcript-api==1.1.0", "pytube==15.0.0", "extract_msg", diff --git a/src/app.html b/src/app.html index d19f3d227e..6fa5b79cee 100644 --- a/src/app.html +++ b/src/app.html @@ -24,6 +24,7 @@ href="/opensearch.xml" /> + + + + {#if selectedFeedback} +
+
+
+ {$i18n.t('Feedback Details')} +
+ +
+ +
+
+
+
{$i18n.t('Rating')}
+ +
+ {selectedFeedback?.data?.details?.rating ?? '-'} +
+
+
+
{$i18n.t('Reason')}
+ +
+ {selectedFeedback?.data?.reason || '-'} +
+
+ +
+ {#if selectedFeedback?.data?.tags && selectedFeedback?.data?.tags.length} +
+ {#each selectedFeedback?.data?.tags as tag} + {tag} + {/each} +
+ {:else} + - + {/if} +
+
+ +
+
+
+
+ {/if} +
diff --git a/src/lib/components/admin/Evaluations/Feedbacks.svelte b/src/lib/components/admin/Evaluations/Feedbacks.svelte index 726028664a..0dcf02e1c1 100644 --- a/src/lib/components/admin/Evaluations/Feedbacks.svelte +++ b/src/lib/components/admin/Evaluations/Feedbacks.svelte @@ -18,12 +18,19 @@ import CloudArrowUp from '$lib/components/icons/CloudArrowUp.svelte'; import Pagination from '$lib/components/common/Pagination.svelte'; import FeedbackMenu from './FeedbackMenu.svelte'; + import FeedbackModal from './FeedbackModal.svelte'; import EllipsisHorizontal from '$lib/components/icons/EllipsisHorizontal.svelte'; + import ChevronUp from '$lib/components/icons/ChevronUp.svelte'; + import ChevronDown from '$lib/components/icons/ChevronDown.svelte'; + export let feedbacks = []; let page = 1; - $: paginatedFeedbacks = feedbacks.slice((page - 1) * 10, page * 10); + $: paginatedFeedbacks = sortedFeedbacks.slice((page - 1) * 10, page * 10); + + let orderBy: string = 'updated_at'; + let direction: 'asc' | 'desc' = 'desc'; type Feedback = { id: string; @@ -48,6 +55,58 @@ lost: number; }; + function setSortKey(key: string) { + if (orderBy === key) { + direction = direction === 'asc' ? 'desc' : 'asc'; + } else { + orderBy = key; + if (key === 'user' || key === 'model_id') { + direction = 'asc'; + } else { + direction = 'desc'; + } + } + page = 1; + } + + $: sortedFeedbacks = [...feedbacks].sort((a, b) => { + let aVal, bVal; + + switch (orderBy) { + case 'user': + aVal = a.user?.name || ''; + bVal = b.user?.name || ''; + return direction === 'asc' ? aVal.localeCompare(bVal) : bVal.localeCompare(aVal); + case 'model_id': + aVal = a.data.model_id || ''; + bVal = b.data.model_id || ''; + return direction === 'asc' ? aVal.localeCompare(bVal) : bVal.localeCompare(aVal); + case 'rating': + aVal = a.data.rating; + bVal = b.data.rating; + return direction === 'asc' ? aVal - bVal : bVal - aVal; + case 'updated_at': + aVal = a.updated_at; + bVal = b.updated_at; + return direction === 'asc' ? aVal - bVal : bVal - aVal; + default: + return 0; + } + }); + + let showFeedbackModal = false; + let selectedFeedback = null; + + const openFeedbackModal = (feedback) => { + showFeedbackModal = true; + selectedFeedback = feedback; + }; + + const closeFeedbackModal = () => { + showFeedbackModal = false; + selectedFeedback = null; + }; + ////////////////////// // // CRUD operations @@ -106,6 +165,8 @@ }; + +
{$i18n.t('Feedback History')} @@ -146,20 +207,96 @@ class="text-xs text-gray-700 uppercase bg-gray-50 dark:bg-gray-850 dark:text-gray-400 -translate-y-0.5" > - - {$i18n.t('User')} + setSortKey('user')} + > +
+ {$i18n.t('User')} + {#if orderBy === 'user'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
- - {$i18n.t('Models')} + setSortKey('model_id')} + > +
+ {$i18n.t('Models')} + {#if orderBy === 'model_id'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
- - {$i18n.t('Result')} + setSortKey('rating')} + > +
+ {$i18n.t('Result')} + {#if orderBy === 'rating'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
- - {$i18n.t('Updated At')} + setSortKey('updated_at')} + > +
+ {$i18n.t('Updated At')} + {#if orderBy === 'updated_at'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
@@ -167,7 +304,10 @@ {#each paginatedFeedbacks as feedback (feedback.id)} - + openFeedbackModal(feedback)} + >
diff --git a/src/lib/components/admin/Evaluations/Leaderboard.svelte b/src/lib/components/admin/Evaluations/Leaderboard.svelte index e5d8a21662..46daf21278 100644 --- a/src/lib/components/admin/Evaluations/Leaderboard.svelte +++ b/src/lib/components/admin/Evaluations/Leaderboard.svelte @@ -7,10 +7,15 @@ import { onMount, getContext } from 'svelte'; import { models } from '$lib/stores'; + import ModelModal from './LeaderboardModal.svelte'; + import Spinner from '$lib/components/common/Spinner.svelte'; import Tooltip from '$lib/components/common/Tooltip.svelte'; import MagnifyingGlass from '$lib/components/icons/MagnifyingGlass.svelte'; + import ChevronUp from '$lib/components/icons/ChevronUp.svelte'; + import ChevronDown from '$lib/components/icons/ChevronDown.svelte'; + const i18n = getContext('i18n'); const EMBEDDING_MODEL = 'TaylorAI/bge-micro-v2'; @@ -28,6 +33,9 @@ let loadingLeaderboard = true; let debounceTimer; + let orderBy: string = 'rating'; // default sort column + let direction: 'asc' | 'desc' = 'desc'; // default sort order + type Feedback = { id: string; data: { @@ -51,6 +59,34 @@ lost: number; }; + function setSortKey(key) { + if (orderBy === key) { + direction = direction === 'asc' ? 'desc' : 'asc'; + } else { + orderBy = key; + direction = key === 'name' ? 'asc' : 'desc'; + } + } + + ////////////////////// + // + // Aggregate Level Modal + // + ////////////////////// + + let showLeaderboardModal = false; + let selectedModel = null; + + const openFeedbackModal = (model) => { + showLeaderboardModal = true; + selectedModel = model; + }; + + const closeLeaderboardModal = () => { + showLeaderboardModal = false; + selectedModel = null; + }; + ////////////////////// // // Rank models by Elo rating @@ -266,8 +302,37 @@ onMount(async () => { rankHandler(); }); + + $: sortedModels = [...rankedModels].sort((a, b) => { + let aVal, bVal; + if (orderBy === 'name') { + aVal = a.name; + bVal = b.name; + return direction === 'asc' ? aVal.localeCompare(bVal) : bVal.localeCompare(aVal); + } else if (orderBy === 'rating') { + aVal = a.rating === '-' ? -Infinity : a.rating; + bVal = b.rating === '-' ? -Infinity : b.rating; + return direction === 'asc' ? aVal - bVal : bVal - aVal; + } else if (orderBy === 'won') { + aVal = a.stats.won === '-' ? -Infinity : Number(a.stats.won); + bVal = b.stats.won === '-' ? -Infinity : Number(b.stats.won); + return direction === 'asc' ? aVal - bVal : bVal - aVal; + } else if (orderBy === 'lost') { + aVal = a.stats.lost === '-' ? -Infinity : Number(a.stats.lost); + bVal = b.stats.lost === '-' ? -Infinity : Number(b.stats.lost); + return direction === 'asc' ? aVal - bVal : bVal - aVal; + } + return 0; + }); + +
@@ -324,26 +389,124 @@ class="text-xs text-gray-700 uppercase bg-gray-50 dark:bg-gray-850 dark:text-gray-400 -translate-y-0.5" > - - {$i18n.t('RK')} + setSortKey('rating')} + > +
+ {$i18n.t('RK')} + {#if orderBy === 'rating'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
- - {$i18n.t('Model')} + setSortKey('name')} + > +
+ {$i18n.t('Model')} + {#if orderBy === 'name'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
- - {$i18n.t('Rating')} + setSortKey('rating')} + > +
+ {$i18n.t('Rating')} + {#if orderBy === 'rating'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
- - {$i18n.t('Won')} + setSortKey('won')} + > +
+ {$i18n.t('Won')} + {#if orderBy === 'won'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
- - {$i18n.t('Lost')} + setSortKey('lost')} + > +
+ {$i18n.t('Lost')} + {#if orderBy === 'lost'} + + {#if direction === 'asc'} + + {:else} + + {/if} + + {:else} + + {/if} +
- {#each rankedModels as model, modelIdx (model.id)} - + {#each sortedModels as model, modelIdx (model.id)} + openFeedbackModal(model)} + >
{model?.rating !== '-' ? modelIdx + 1 : '-'} diff --git a/src/lib/components/admin/Evaluations/LeaderboardModal.svelte b/src/lib/components/admin/Evaluations/LeaderboardModal.svelte new file mode 100644 index 0000000000..e90405630b --- /dev/null +++ b/src/lib/components/admin/Evaluations/LeaderboardModal.svelte @@ -0,0 +1,77 @@ + + + + {#if model} +
+
+ {model.name} +
+ +
+
+
+ {#if topTags.length} +
+ {#each topTags as tagInfo} + + {tagInfo.tag} ({tagInfo.count}) + + {/each} +
+ {:else} + - + {/if} +
+
+ +
+
+ {/if} +
diff --git a/src/lib/components/admin/Settings/Audio.svelte b/src/lib/components/admin/Settings/Audio.svelte index 960f3497ac..da3912a514 100644 --- a/src/lib/components/admin/Settings/Audio.svelte +++ b/src/lib/components/admin/Settings/Audio.svelte @@ -39,6 +39,7 @@ let STT_OPENAI_API_KEY = ''; let STT_ENGINE = ''; let STT_MODEL = ''; + let STT_SUPPORTED_CONTENT_TYPES = ''; let STT_WHISPER_MODEL = ''; let STT_AZURE_API_KEY = ''; let STT_AZURE_REGION = ''; @@ -114,6 +115,7 @@ OPENAI_API_KEY: STT_OPENAI_API_KEY, ENGINE: STT_ENGINE, MODEL: STT_MODEL, + SUPPORTED_CONTENT_TYPES: STT_SUPPORTED_CONTENT_TYPES.split(','), WHISPER_MODEL: STT_WHISPER_MODEL, DEEPGRAM_API_KEY: STT_DEEPGRAM_API_KEY, AZURE_API_KEY: STT_AZURE_API_KEY, @@ -160,6 +162,7 @@ STT_ENGINE = res.stt.ENGINE; STT_MODEL = res.stt.MODEL; + STT_SUPPORTED_CONTENT_TYPES = (res?.stt?.SUPPORTED_CONTENT_TYPES ?? []).join(','); STT_WHISPER_MODEL = res.stt.WHISPER_MODEL; STT_AZURE_API_KEY = res.stt.AZURE_API_KEY; STT_AZURE_REGION = res.stt.AZURE_REGION; @@ -184,9 +187,26 @@
-
{$i18n.t('STT Settings')}
+
{$i18n.t('Speech-to-Text')}
-
+
+ + {#if STT_ENGINE !== 'web'} +
+
{$i18n.t('Supported MIME Types')}
+
+
+ +
+
+
+ {/if} + +
{$i18n.t('Speech-to-Text Engine')}
-
{$i18n.t('STT Model')}
+
{$i18n.t('STT Model')}
-
{$i18n.t('Azure Region')}
+
{$i18n.t('Azure Region')}
-
{$i18n.t('Language Locales')}
+
{$i18n.t('Language Locales')}
-
{$i18n.t('Endpoint URL')}
+
{$i18n.t('Endpoint URL')}
-
{$i18n.t('Max Speakers')}
+
{$i18n.t('Max Speakers')}
{:else if STT_ENGINE === ''}
-
{$i18n.t('STT Model')}
+
{$i18n.t('STT Model')}
@@ -416,12 +436,12 @@ {/if}
-
-
-
{$i18n.t('TTS Settings')}
+
{$i18n.t('Text-to-Speech')}
-
+
+ +
{$i18n.t('Text-to-Speech Engine')}
-
{$i18n.t('Endpoint URL')}
+
{$i18n.t('Endpoint URL')}
{/if} -
- - {#if TTS_ENGINE === ''} -
-
{$i18n.t('TTS Voice')}
-
-
- -
-
-
- {:else if TTS_ENGINE === 'transformers'} -
-
{$i18n.t('TTS Model')}
-
-
- - - - -
-
-
- {$i18n.t(`Open WebUI uses SpeechT5 and CMU Arctic speaker embeddings.`)} - - To learn more about SpeechT5, - - - {$i18n.t(`click here`, { - name: 'SpeechT5' - })}. - - To see the available CMU Arctic speaker embeddings, - - {$i18n.t(`click here`)}. - -
-
- {:else if TTS_ENGINE === 'openai'} -
-
-
{$i18n.t('TTS Voice')}
+
+ {#if TTS_ENGINE === ''} +
+
{$i18n.t('TTS Voice')}
- - - + > + {#each voices as voice} - + {/each} - +
-
-
{$i18n.t('TTS Model')}
+ {:else if TTS_ENGINE === 'transformers'} +
+
{$i18n.t('TTS Model')}
- - {#each models as model} -
-
-
- {:else if TTS_ENGINE === 'elevenlabs'} -
-
-
{$i18n.t('TTS Voice')}
-
-
- +
+ {$i18n.t(`Open WebUI uses SpeechT5 and CMU Arctic speaker embeddings.`)} - - {#each voices as voice} - - {/each} - -
-
-
-
-
{$i18n.t('TTS Model')}
-
-
- + To learn more about SpeechT5, - - {#each models as model} - -
-
-
-
- {:else if TTS_ENGINE === 'azure'} -
-
-
{$i18n.t('TTS Voice')}
-
-
- - - - {#each voices as voice} - - {/each} - -
-
-
-
-
- {$i18n.t('Output format')} - {$i18n.t('Available list')} + {$i18n.t(`click here`, { + name: 'SpeechT5' + })}. + + To see the available CMU Arctic speaker embeddings, + + {$i18n.t(`click here`)}.
-
-
- +
+ {:else if TTS_ENGINE === 'openai'} +
+
+
{$i18n.t('TTS Voice')}
+
+
+ + + + {#each voices as voice} + + {/each} + +
+
+
+
+
{$i18n.t('TTS Model')}
+
+
+ + + + {#each models as model} + +
-
- {/if} + {:else if TTS_ENGINE === 'elevenlabs'} +
+
+
{$i18n.t('TTS Voice')}
+
+
+ -
+ + {#each voices as voice} + + {/each} + +
+
+
+
+
{$i18n.t('TTS Model')}
+
+
+ + + + {#each models as model} + +
+
+
+
+ {:else if TTS_ENGINE === 'azure'} +
+
+
{$i18n.t('TTS Voice')}
+
+
+ + + + {#each voices as voice} + + {/each} + +
+
+
+
+
+ {$i18n.t('Output format')} + + {$i18n.t('Available list')} + +
+
+
+ +
+
+
+
+ {/if} +
{$i18n.t('Response splitting')}
diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index 738cefc335..e05abf686c 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -1144,6 +1144,50 @@
+ +
+
{$i18n.t('Image Compression Width')}
+
+ + + +
+
+ +
+
+ {$i18n.t('Image Compression Height')} +
+
+ + + +
+
diff --git a/src/lib/components/admin/Users/Groups.svelte b/src/lib/components/admin/Users/Groups.svelte index 70e5832ea6..338fbbd4cb 100644 --- a/src/lib/components/admin/Users/Groups.svelte +++ b/src/lib/components/admin/Users/Groups.svelte @@ -65,6 +65,7 @@ }, chat: { controls: true, + system_prompt: true, file_upload: true, delete: true, edit: true, diff --git a/src/lib/components/admin/Users/Groups/Permissions.svelte b/src/lib/components/admin/Users/Groups/Permissions.svelte index 6af935813b..04e81a8076 100644 --- a/src/lib/components/admin/Users/Groups/Permissions.svelte +++ b/src/lib/components/admin/Users/Groups/Permissions.svelte @@ -263,6 +263,14 @@
+
+
+ {$i18n.t('Allow Chat System Prompt')} +
+ + +
+
{$i18n.t('Allow Chat Delete')} diff --git a/src/lib/components/admin/Users/UserList/EditUserModal.svelte b/src/lib/components/admin/Users/UserList/EditUserModal.svelte index 91946e3eed..929b6d5070 100644 --- a/src/lib/components/admin/Users/UserList/EditUserModal.svelte +++ b/src/lib/components/admin/Users/UserList/EditUserModal.svelte @@ -101,7 +101,7 @@
@@ -134,7 +133,7 @@
{ let imageUrl = event.target.result; - if ($settings?.imageCompression ?? false) { - const width = $settings?.imageCompressionSize?.width ?? null; - const height = $settings?.imageCompressionSize?.height ?? null; + if ( + ($settings?.imageCompression ?? false) || + ($config?.file?.image_compression?.width ?? null) || + ($config?.file?.image_compression?.height ?? null) + ) { + let width = null; + let height = null; + + if ($settings?.imageCompression ?? false) { + width = $settings?.imageCompressionSize?.width ?? null; + height = $settings?.imageCompressionSize?.height ?? null; + } + + if ( + ($config?.file?.image_compression?.width ?? null) || + ($config?.file?.image_compression?.height ?? null) + ) { + if (width > ($config?.file?.image_compression?.width ?? null)) { + width = $config?.file?.image_compression?.width ?? null; + } + if (height > ($config?.file?.image_compression?.height ?? null)) { + height = $config?.file?.image_compression?.height ?? null; + } + } if (width || height) { imageUrl = await compressImage(imageUrl, width, height); diff --git a/src/lib/components/channel/Messages/Message/ProfilePreview.svelte b/src/lib/components/channel/Messages/Message/ProfilePreview.svelte index a9db05b1e8..a0f3e1c322 100644 --- a/src/lib/components/channel/Messages/Message/ProfilePreview.svelte +++ b/src/lib/components/channel/Messages/Message/ProfilePreview.svelte @@ -1,10 +1,9 @@ { - dispatch('change', state); - }} + onOpenChange={(state) => {}} typeahead={false} > @@ -52,7 +65,7 @@
- {#if $activeUserIds.includes(user.id)} + {#if active}
{ loading = true; console.log('mounted'); window.addEventListener('message', onMessageHandler); $socket?.on('chat-events', chatEventHandler); - page.subscribe((page) => { - if (page.url.pathname === '/') { + pageSubscribe = page.subscribe(async (p) => { + if (p.url.pathname === '/') { + await tick(); initNewChat(); } }); - if (!$chatId) { - chatIdUnsubscriber = chatId.subscribe(async (value) => { - if (!value) { - await tick(); // Wait for DOM updates - await initNewChat(); - } - }); - } else { - if ($temporaryChatEnabled) { - await goto('/'); - } - } - if (localStorage.getItem(`chat-input${chatIdProp ? `-${chatIdProp}` : ''}`)) { prompt = ''; files = []; @@ -515,6 +504,7 @@ }); onDestroy(() => { + pageSubscribe(); chatIdUnsubscriber?.(); window.removeEventListener('message', onMessageHandler); $socket?.off('chat-events', chatEventHandler); @@ -805,6 +795,11 @@ `https://www.youtube.com/watch?v=${$page.url.searchParams.get('youtube')}` ); } + + if ($page.url.searchParams.get('load-url')) { + await uploadWeb($page.url.searchParams.get('load-url')); + } + if ($page.url.searchParams.get('web-search') === 'true') { webSearchEnabled = true; } @@ -813,6 +808,10 @@ imageGenerationEnabled = true; } + if ($page.url.searchParams.get('code-interpreter') === 'true') { + codeInterpreterEnabled = true; + } + if ($page.url.searchParams.get('tools')) { selectedToolIds = $page.url.searchParams .get('tools') @@ -859,6 +858,11 @@ const loadChat = async () => { chatId.set(chatIdProp); + + if ($temporaryChatEnabled) { + temporaryChatEnabled.set(false); + } + chat = await getChatById(localStorage.token, $chatId).catch(async (error) => { await goto('/'); return null; @@ -878,6 +882,11 @@ (chatContent?.models ?? undefined) !== undefined ? chatContent.models : [chatContent.models ?? '']; + + if (!($user?.role === 'admin' || ($user?.permissions?.chat?.multiple_models ?? true))) { + selectedModels = selectedModels.length > 0 ? [selectedModels[0]] : ['']; + } + oldSelectedModelIds = selectedModels; history = @@ -1725,6 +1734,7 @@ history.messages[responseMessageId] = responseMessage; history.currentId = responseMessageId; + return null; }); @@ -1821,7 +1831,8 @@ childrenIds: [], role: 'user', content: userPrompt, - models: selectedModels + models: selectedModels, + timestamp: Math.floor(Date.now() / 1000) // Unix epoch }; if (parentId !== null) { @@ -2107,13 +2118,15 @@ {stopResponse} {createMessagePair} onChange={(input) => { - if (input.prompt !== null) { - localStorage.setItem( - `chat-input${$chatId ? `-${$chatId}` : ''}`, - JSON.stringify(input) - ); - } else { - localStorage.removeItem(`chat-input${$chatId ? `-${$chatId}` : ''}`); + if (!$temporaryChatEnabled) { + if (input.prompt !== null) { + localStorage.setItem( + `chat-input${$chatId ? `-${$chatId}` : ''}`, + JSON.stringify(input) + ); + } else { + localStorage.removeItem(`chat-input${$chatId ? `-${$chatId}` : ''}`); + } } }} on:upload={async (e) => { diff --git a/src/lib/components/chat/Controls/Controls.svelte b/src/lib/components/chat/Controls/Controls.svelte index d25d89a454..915d36f08d 100644 --- a/src/lib/components/chat/Controls/Controls.svelte +++ b/src/lib/components/chat/Controls/Controls.svelte @@ -67,7 +67,7 @@
- {#if $user?.role === 'admin' || $user?.permissions.chat?.controls} + {#if $user?.role === 'admin' || ($user?.permissions.chat?.system_prompt ?? true)}
@@ -80,7 +80,9 @@ />
+ {/if} + {#if $user?.role === 'admin' || ($user?.permissions.chat?.controls ?? true)}
diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index af7066dc14..f384c5b8f5 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -91,7 +91,15 @@ $: onChange({ prompt, - files: files.filter((file) => file.type !== 'image'), + files: files + .filter((file) => file.type !== 'image') + .map((file) => { + return { + ...file, + user: undefined, + access_control: undefined + }; + }), selectedToolIds, selectedFilterIds, imageGenerationEnabled, @@ -299,6 +307,19 @@ const inputFilesHandler = async (inputFiles) => { console.log('Input files handler called with:', inputFiles); + + if ( + ($config?.file?.max_count ?? null) !== null && + files.length + inputFiles.length > $config?.file?.max_count + ) { + toast.error( + $i18n.t(`You can only chat with a maximum of {{maxCount}} file(s) at a time.`, { + maxCount: $config?.file?.max_count + }) + ); + return; + } + inputFiles.forEach((file) => { console.log('Processing file:', { name: file.name, @@ -334,9 +355,30 @@ reader.onload = async (event) => { let imageUrl = event.target.result; - if ($settings?.imageCompression ?? false) { - const width = $settings?.imageCompressionSize?.width ?? null; - const height = $settings?.imageCompressionSize?.height ?? null; + if ( + ($settings?.imageCompression ?? false) || + ($config?.file?.image_compression?.width ?? null) || + ($config?.file?.image_compression?.height ?? null) + ) { + let width = null; + let height = null; + + if ($settings?.imageCompression ?? false) { + width = $settings?.imageCompressionSize?.width ?? null; + height = $settings?.imageCompressionSize?.height ?? null; + } + + if ( + ($config?.file?.image_compression?.width ?? null) || + ($config?.file?.image_compression?.height ?? null) + ) { + if (width > ($config?.file?.image_compression?.width ?? null)) { + width = $config?.file?.image_compression?.width ?? null; + } + if (height > ($config?.file?.image_compression?.height ?? null)) { + height = $config?.file?.image_compression?.height ?? null; + } + } if (width || height) { imageUrl = await compressImage(imageUrl, width, height); diff --git a/src/lib/components/chat/Messages.svelte b/src/lib/components/chat/Messages.svelte index 22d6b95d0a..3078f5e81b 100644 --- a/src/lib/components/chat/Messages.svelte +++ b/src/lib/components/chat/Messages.svelte @@ -256,6 +256,10 @@ }; const editMessage = async (messageId, { content, files }, submit = true) => { + if ((selectedModels ?? []).filter((id) => id).length === 0) { + toast.error($i18n.t('Model not selected')); + return; + } if (history.messages[messageId].role === 'user') { if (submit) { // New user message diff --git a/src/lib/components/chat/Messages/Citations.svelte b/src/lib/components/chat/Messages/Citations.svelte index 7177f27f25..7d81d64327 100644 --- a/src/lib/components/chat/Messages/Citations.svelte +++ b/src/lib/components/chat/Messages/Citations.svelte @@ -188,9 +188,8 @@
- {#each citations as citation, idx} + {#each citations.slice(2) as citation, idx} +
{#if idx < followUps.length - 1}
diff --git a/src/lib/components/chat/Messages/UserMessage.svelte b/src/lib/components/chat/Messages/UserMessage.svelte index 4a61b7190d..a9960eba30 100644 --- a/src/lib/components/chat/Messages/UserMessage.svelte +++ b/src/lib/components/chat/Messages/UserMessage.svelte @@ -107,7 +107,11 @@ }} /> -
+
{#if !($settings?.chatBubble ?? true)}
m.id === message.user)?.info?.meta?.profile_image_url ?? '/user.png') : (user?.profile_image_url ?? '/user.png')} - className={'size-8'} + className={'size-8 user-message-profile-image'} />
{/if} @@ -143,6 +147,16 @@ {/if}
+ {:else if message.timestamp} +
+ +
{/if}
diff --git a/src/lib/components/chat/ModelSelector/Selector.svelte b/src/lib/components/chat/ModelSelector/Selector.svelte index 7c1251106b..d586553ac2 100644 --- a/src/lib/components/chat/ModelSelector/Selector.svelte +++ b/src/lib/components/chat/ModelSelector/Selector.svelte @@ -425,7 +425,7 @@ class="flex gap-1 w-fit text-center text-sm font-medium rounded-full bg-transparent px-1.5 pb-0.5" bind:this={tagsContainerElement} > - {#if (items.find((item) => item.model?.connection_type === 'local') && items.find((item) => item.model?.connection_type === 'external')) || items.find((item) => item.model?.direct) || tags.length > 0} + {#if items.find((item) => item.model?.connection_type === 'local') || items.find((item) => item.model?.connection_type === 'external') || items.find((item) => item.model?.direct) || tags.length > 0} + {/if} + + {#if items.find((item) => item.model?.connection_type === 'external')} + + {#if !$mobile} + + + + {/if}
- - - - {#if $user !== undefined && $user !== null}
+ {#if $temporaryChatEnabled && $chatId === 'local'} +
+
{$i18n.t('Temporary Chat')}
+
+ {/if} + {#if !history.currentId && !$chatId && ($banners.length > 0 || ($config?.license_metadata?.type ?? null) === 'trial' || (($config?.license_metadata?.seats ?? null) !== null && $config?.user_count > $config?.license_metadata?.seats))}
diff --git a/src/lib/components/chat/Placeholder.svelte b/src/lib/components/chat/Placeholder.svelte index 84135a1958..9a13dba953 100644 --- a/src/lib/components/chat/Placeholder.svelte +++ b/src/lib/components/chat/Placeholder.svelte @@ -93,7 +93,7 @@
{#if $temporaryChatEnabled} @@ -107,7 +107,7 @@ class="w-full text-3xl text-gray-800 dark:text-gray-100 text-center flex items-center gap-4 font-primary" >
-
+
{#each models as model, modelIdx} @@ -138,9 +138,20 @@
-
+
{#if models[selectedModelIdx]?.name} - {models[selectedModelIdx]?.name} + + + {models[selectedModelIdx]?.name} + + {:else} {$i18n.t('Hello, {{name}}', { name: $user?.name })} {/if} @@ -205,10 +216,12 @@ {createMessagePair} placeholder={$i18n.t('How can I help you today?')} onChange={(input) => { - if (input.prompt !== null) { - localStorage.setItem(`chat-input`, JSON.stringify(input)); - } else { - localStorage.removeItem(`chat-input`); + if (!$temporaryChatEnabled) { + if (input.prompt !== null) { + localStorage.setItem(`chat-input`, JSON.stringify(input)); + } else { + localStorage.removeItem(`chat-input`); + } } }} on:upload={(e) => { diff --git a/src/lib/components/chat/Settings/About.svelte b/src/lib/components/chat/Settings/About.svelte index 56cf484b79..d4c3723077 100644 --- a/src/lib/components/chat/Settings/About.svelte +++ b/src/lib/components/chat/Settings/About.svelte @@ -42,7 +42,7 @@ }); -
+
diff --git a/src/lib/components/chat/Settings/Account.svelte b/src/lib/components/chat/Settings/Account.svelte index 20866af3c2..ab87b8d1a7 100644 --- a/src/lib/components/chat/Settings/Account.svelte +++ b/src/lib/components/chat/Settings/Account.svelte @@ -86,7 +86,7 @@ }); -
+
{ saveSettings({ diff --git a/src/lib/components/chat/Settings/Chats.svelte b/src/lib/components/chat/Settings/Chats.svelte index 7ef0d7a568..f11897d6e9 100644 --- a/src/lib/components/chat/Settings/Chats.svelte +++ b/src/lib/components/chat/Settings/Chats.svelte @@ -107,7 +107,7 @@ -
+
{ updateHandler(); @@ -126,7 +127,11 @@
-
+
{$i18n.t('Connect to your own OpenAI compatible API endpoints.')}
{$i18n.t( diff --git a/src/lib/components/chat/Settings/Connections/Connection.svelte b/src/lib/components/chat/Settings/Connections/Connection.svelte index b81ce4d157..ea9089f2f1 100644 --- a/src/lib/components/chat/Settings/Connections/Connection.svelte +++ b/src/lib/components/chat/Settings/Connections/Connection.svelte @@ -2,6 +2,7 @@ import { getContext, tick } from 'svelte'; const i18n = getContext('i18n'); + import { settings } from '$lib/stores'; import Tooltip from '$lib/components/common/Tooltip.svelte'; import SensitiveInput from '$lib/components/common/SensitiveInput.svelte'; import Cog6 from '$lib/components/icons/Cog6.svelte'; @@ -65,7 +66,7 @@
@@ -83,6 +84,7 @@
-
+
{$i18n.t('Allow User Location')}
@@ -99,10 +107,10 @@ {#if role === 'admin'} - { + goto('/playground'); show = false; if ($mobile) { @@ -111,28 +119,15 @@ }} >
- - - +
{$i18n.t('Playground')}
-
+ - { + goto('/admin'); show = false; if ($mobile) { @@ -141,23 +136,10 @@ }} >
- - - +
{$i18n.t('Admin Panel')}
-
+ {/if} {#if help} @@ -165,10 +147,11 @@ { window.open('https://docs.openwebui.com', '_blank'); + show = false; }} > @@ -177,10 +160,11 @@ { window.open('https://github.com/open-webui/open-webui/releases', '_blank'); + show = false; }} > @@ -188,7 +172,7 @@ { showShortcuts = !showShortcuts; @@ -214,55 +198,46 @@ }} >
- - - - +
{$i18n.t('Sign Out')}
- {#if $activeUserIds?.length > 0} -
+ {#if usage} + {#if usage?.user_ids?.length > 0} +
- 0 - ? `${$i18n.t('Running')}: ${$USAGE_POOL.join(', ')} ✨` - : ''} - > -
-
- - - - -
+ 0 + ? `${$i18n.t('Running')}: ${usage.model_ids.join(', ')} ✨` + : ''} + > +
{ + getUsageInfo(); + }} + > +
+ + + + +
-
- - {$i18n.t('Active Users')}: - - - {$activeUserIds?.length} - +
+ + {$i18n.t('Active Users')}: + + + {usage?.user_ids?.length} + +
-
-
+ + {/if} {/if}