diff --git a/CHANGELOG.md b/CHANGELOG.md index bad83dc1ef..126f14e006 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,34 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.5.11] - 2025-02-13 + +### Added + +- **🎤 Kokoro-JS TTS Support**: A new on-device, high-quality text-to-speech engine has been integrated, vastly improving voice generation quality—everything runs directly in your browser. +- **🐍 Jupyter Notebook Support in Code Interpreter**: Now, you can configure Code Interpreter to run Python code not only via Pyodide but also through Jupyter, offering a more robust coding environment for AI-driven computations and analysis. +- **🔗 Direct API Connections for Private & Local Inference**: You can now connect Open WebUI to your private or localhost API inference endpoints. CORS must be enabled, but this unlocks direct, on-device AI infrastructure support. +- **🔍 Advanced Domain Filtering for Web Search**: You can now specify which domains should be included or excluded from web searches, refining results for more relevant information retrieval. +- **🚀 Improved Image Generation Metadata Handling**: Generated images now retain metadata for better organization and future retrieval. +- **📂 S3 Key Prefix Support**: Fine-grained control over S3 storage file structuring with configurable key prefixes. +- **📸 Support for Image-Only Messages**: Send messages containing only images, facilitating more visual-centric interactions. +- **🌍 Updated Translations**: German, Spanish, Traditional Chinese, and Catalan translations updated for better multilingual support. + +### Fixed + +- **🔧 OAuth Debug Logs & Username Claim Fixes**: Debug logs have been added for OAuth role and group management, with fixes ensuring proper OAuth username retrieval and claim handling. +- **📌 Citations Formatting & Toggle Fixes**: Inline citation toggles now function correctly, and citations with more than three sources are now fully visible when expanded. +- **📸 ComfyUI Maximum Seed Value Constraint Fixed**: The maximum allowed seed value for ComfyUI has been corrected, preventing unintended behavior. +- **🔑 Connection Settings Stability**: Addressed connection settings issues that were causing instability when saving configurations. +- **📂 GGUF Model Upload Stability**: Fixed upload inconsistencies for GGUF models, ensuring reliable local model handling. +- **🔧 Web Search Configuration Bug**: Fixed issues where web search filters and settings weren't correctly applied. +- **💾 User Settings Persistence Fix**: Ensured user-specific settings are correctly saved and applied across sessions. +- **🔄 OpenID Username Retrieval Enhancement**: Usernames are now correctly picked up and assigned for OpenID Connect (OIDC) logins. + +### Changed + +- **🔗 Improved Direct Connections Integration**: Simplified the configuration process for setting up direct API connections, making it easier to integrate custom inference endpoints. + ## [0.5.10] - 2025-02-05 ### Fixed diff --git a/README.md b/README.md index 0fb03537df..56ab09b05d 100644 --- a/README.md +++ b/README.md @@ -174,7 +174,7 @@ docker run --rm --volume /var/run/docker.sock:/var/run/docker.sock containrrr/wa In the last part of the command, replace `open-webui` with your container name if it is different. -Check our Migration Guide available in our [Open WebUI Documentation](https://docs.openwebui.com/tutorials/migration/). +Check our Updating Guide available in our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/updating). ### Using the Dev Branch 🌙 diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index bf6f1d0256..ff298dc5b9 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -660,6 +660,7 @@ S3_ACCESS_KEY_ID = os.environ.get("S3_ACCESS_KEY_ID", None) S3_SECRET_ACCESS_KEY = os.environ.get("S3_SECRET_ACCESS_KEY", None) S3_REGION_NAME = os.environ.get("S3_REGION_NAME", None) S3_BUCKET_NAME = os.environ.get("S3_BUCKET_NAME", None) +S3_KEY_PREFIX = os.environ.get("S3_KEY_PREFIX", None) S3_ENDPOINT_URL = os.environ.get("S3_ENDPOINT_URL", None) GCS_BUCKET_NAME = os.environ.get("GCS_BUCKET_NAME", None) @@ -682,6 +683,17 @@ Path(UPLOAD_DIR).mkdir(parents=True, exist_ok=True) CACHE_DIR = f"{DATA_DIR}/cache" Path(CACHE_DIR).mkdir(parents=True, exist_ok=True) + +#################################### +# DIRECT CONNECTIONS +#################################### + +ENABLE_DIRECT_CONNECTIONS = PersistentConfig( + "ENABLE_DIRECT_CONNECTIONS", + "direct.enable", + os.environ.get("ENABLE_DIRECT_CONNECTIONS", "True").lower() == "true", +) + #################################### # OLLAMA_BASE_URL #################################### @@ -1325,6 +1337,54 @@ Your task is to synthesize these responses into a single, high-quality response. Responses from models: {{responses}}""" +#################################### +# Code Interpreter +#################################### + +ENABLE_CODE_INTERPRETER = PersistentConfig( + "ENABLE_CODE_INTERPRETER", + "code_interpreter.enable", + os.environ.get("ENABLE_CODE_INTERPRETER", "True").lower() == "true", +) + +CODE_INTERPRETER_ENGINE = PersistentConfig( + "CODE_INTERPRETER_ENGINE", + "code_interpreter.engine", + os.environ.get("CODE_INTERPRETER_ENGINE", "pyodide"), +) + +CODE_INTERPRETER_PROMPT_TEMPLATE = PersistentConfig( + "CODE_INTERPRETER_PROMPT_TEMPLATE", + "code_interpreter.prompt_template", + os.environ.get("CODE_INTERPRETER_PROMPT_TEMPLATE", ""), +) + +CODE_INTERPRETER_JUPYTER_URL = PersistentConfig( + "CODE_INTERPRETER_JUPYTER_URL", + "code_interpreter.jupyter.url", + os.environ.get("CODE_INTERPRETER_JUPYTER_URL", ""), +) + +CODE_INTERPRETER_JUPYTER_AUTH = PersistentConfig( + "CODE_INTERPRETER_JUPYTER_AUTH", + "code_interpreter.jupyter.auth", + os.environ.get("CODE_INTERPRETER_JUPYTER_AUTH", ""), +) + +CODE_INTERPRETER_JUPYTER_AUTH_TOKEN = PersistentConfig( + "CODE_INTERPRETER_JUPYTER_AUTH_TOKEN", + "code_interpreter.jupyter.auth_token", + os.environ.get("CODE_INTERPRETER_JUPYTER_AUTH_TOKEN", ""), +) + + +CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD = PersistentConfig( + "CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD", + "code_interpreter.jupyter.auth_password", + os.environ.get("CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD", ""), +) + + DEFAULT_CODE_INTERPRETER_PROMPT = """ #### Tools Available @@ -1335,9 +1395,8 @@ DEFAULT_CODE_INTERPRETER_PROMPT = """ - When coding, **always aim to print meaningful outputs** (e.g., results, tables, summaries, or visuals) to better interpret and verify the findings. Avoid relying on implicit outputs; prioritize explicit and clear print statements so the results are effectively communicated to the user. - After obtaining the printed output, **always provide a concise analysis, interpretation, or next steps to help the user understand the findings or refine the outcome further.** - If the results are unclear, unexpected, or require validation, refine the code and execute it again as needed. Always aim to deliver meaningful insights from the results, iterating if necessary. - - If a link is provided for an image, audio, or any file, include it in the response exactly as given to ensure the user has access to the original resource. + - **If a link to an image, audio, or any file is provided in markdown format in the output, ALWAYS regurgitate word for word, explicitly display it as part of the response to ensure the user can access it easily, do NOT change the link.** - All responses should be communicated in the chat's primary language, ensuring seamless understanding. If the chat is multilingual, default to English for clarity. - - **If a link to an image, audio, or any file is provided in markdown format, ALWAYS regurgitate explicitly display it as part of the response to ensure the user can access it easily, do NOT change the link.** Ensure that the tools are effectively utilized to achieve the highest-quality analysis for the user.""" @@ -1645,7 +1704,7 @@ RAG_WEB_SEARCH_ENGINE = PersistentConfig( # This ensures the highest level of safety and reliability of the information sources. RAG_WEB_SEARCH_DOMAIN_FILTER_LIST = PersistentConfig( "RAG_WEB_SEARCH_DOMAIN_FILTER_LIST", - "rag.rag.web.search.domain.filter_list", + "rag.web.search.domain.filter_list", [ # "wikipedia.com", # "wikimedia.org", @@ -1690,6 +1749,12 @@ MOJEEK_SEARCH_API_KEY = PersistentConfig( os.getenv("MOJEEK_SEARCH_API_KEY", ""), ) +BOCHA_SEARCH_API_KEY = PersistentConfig( + "BOCHA_SEARCH_API_KEY", + "rag.web.search.bocha_search_api_key", + os.getenv("BOCHA_SEARCH_API_KEY", ""), +) + SERPSTACK_API_KEY = PersistentConfig( "SERPSTACK_API_KEY", "rag.web.search.serpstack_api_key", @@ -2012,6 +2077,12 @@ WHISPER_MODEL_AUTO_UPDATE = ( and os.environ.get("WHISPER_MODEL_AUTO_UPDATE", "").lower() == "true" ) +# Add Deepgram configuration +DEEPGRAM_API_KEY = PersistentConfig( + "DEEPGRAM_API_KEY", + "audio.stt.deepgram.api_key", + os.getenv("DEEPGRAM_API_KEY", ""), +) AUDIO_STT_OPENAI_API_BASE_URL = PersistentConfig( "AUDIO_STT_OPENAI_API_BASE_URL", diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index 00605e15dc..0be3887f82 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -92,6 +92,7 @@ log_sources = [ "RAG", "WEBHOOK", "SOCKET", + "OAUTH", ] SRC_LOG_LEVELS = {} diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 863f58dea5..88b5b3f692 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -97,6 +97,16 @@ from open_webui.config import ( OPENAI_API_BASE_URLS, OPENAI_API_KEYS, OPENAI_API_CONFIGS, + # Direct Connections + ENABLE_DIRECT_CONNECTIONS, + # Code Interpreter + ENABLE_CODE_INTERPRETER, + CODE_INTERPRETER_ENGINE, + CODE_INTERPRETER_PROMPT_TEMPLATE, + CODE_INTERPRETER_JUPYTER_URL, + CODE_INTERPRETER_JUPYTER_AUTH, + CODE_INTERPRETER_JUPYTER_AUTH_TOKEN, + CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD, # Image AUTOMATIC1111_API_AUTH, AUTOMATIC1111_BASE_URL, @@ -130,6 +140,7 @@ from open_webui.config import ( AUDIO_TTS_AZURE_SPEECH_REGION, AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT, WHISPER_MODEL, + DEEPGRAM_API_KEY, WHISPER_MODEL_AUTO_UPDATE, WHISPER_MODEL_DIR, # Retrieval @@ -180,6 +191,7 @@ from open_webui.config import ( EXA_API_KEY, KAGI_SEARCH_API_KEY, MOJEEK_SEARCH_API_KEY, + BOCHA_SEARCH_API_KEY, GOOGLE_PSE_API_KEY, GOOGLE_PSE_ENGINE_ID, GOOGLE_DRIVE_CLIENT_ID, @@ -322,7 +334,11 @@ class SPAStaticFiles(StaticFiles): return await super().get_response(path, scope) except (HTTPException, StarletteHTTPException) as ex: if ex.status_code == 404: - return await super().get_response("index.html", scope) + if path.endswith(".js"): + # Return 404 for javascript files + raise ex + else: + return await super().get_response("index.html", scope) else: raise ex @@ -389,6 +405,14 @@ app.state.config.OPENAI_API_CONFIGS = OPENAI_API_CONFIGS app.state.OPENAI_MODELS = {} +######################################## +# +# DIRECT CONNECTIONS +# +######################################## + +app.state.config.ENABLE_DIRECT_CONNECTIONS = ENABLE_DIRECT_CONNECTIONS + ######################################## # # WEBUI @@ -514,6 +538,7 @@ app.state.config.GOOGLE_PSE_ENGINE_ID = GOOGLE_PSE_ENGINE_ID app.state.config.BRAVE_SEARCH_API_KEY = BRAVE_SEARCH_API_KEY app.state.config.KAGI_SEARCH_API_KEY = KAGI_SEARCH_API_KEY app.state.config.MOJEEK_SEARCH_API_KEY = MOJEEK_SEARCH_API_KEY +app.state.config.BOCHA_SEARCH_API_KEY = BOCHA_SEARCH_API_KEY app.state.config.SERPSTACK_API_KEY = SERPSTACK_API_KEY app.state.config.SERPSTACK_HTTPS = SERPSTACK_HTTPS app.state.config.SERPER_API_KEY = SERPER_API_KEY @@ -569,6 +594,24 @@ app.state.EMBEDDING_FUNCTION = get_embedding_function( app.state.config.RAG_EMBEDDING_BATCH_SIZE, ) +######################################## +# +# CODE INTERPRETER +# +######################################## + +app.state.config.ENABLE_CODE_INTERPRETER = ENABLE_CODE_INTERPRETER +app.state.config.CODE_INTERPRETER_ENGINE = CODE_INTERPRETER_ENGINE +app.state.config.CODE_INTERPRETER_PROMPT_TEMPLATE = CODE_INTERPRETER_PROMPT_TEMPLATE + +app.state.config.CODE_INTERPRETER_JUPYTER_URL = CODE_INTERPRETER_JUPYTER_URL +app.state.config.CODE_INTERPRETER_JUPYTER_AUTH = CODE_INTERPRETER_JUPYTER_AUTH +app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_TOKEN = ( + CODE_INTERPRETER_JUPYTER_AUTH_TOKEN +) +app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD = ( + CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD +) ######################################## # @@ -611,6 +654,7 @@ app.state.config.STT_ENGINE = AUDIO_STT_ENGINE app.state.config.STT_MODEL = AUDIO_STT_MODEL app.state.config.WHISPER_MODEL = WHISPER_MODEL +app.state.config.DEEPGRAM_API_KEY = DEEPGRAM_API_KEY app.state.config.TTS_OPENAI_API_BASE_URL = AUDIO_TTS_OPENAI_API_BASE_URL app.state.config.TTS_OPENAI_API_KEY = AUDIO_TTS_OPENAI_API_KEY @@ -753,6 +797,7 @@ app.include_router(openai.router, prefix="/openai", tags=["openai"]) app.include_router(pipelines.router, prefix="/api/v1/pipelines", tags=["pipelines"]) app.include_router(tasks.router, prefix="/api/v1/tasks", tags=["tasks"]) app.include_router(images.router, prefix="/api/v1/images", tags=["images"]) + app.include_router(audio.router, prefix="/api/v1/audio", tags=["audio"]) app.include_router(retrieval.router, prefix="/api/v1/retrieval", tags=["retrieval"]) @@ -855,20 +900,30 @@ async def chat_completion( if not request.app.state.MODELS: await get_all_models(request) + model_item = form_data.pop("model_item", {}) tasks = form_data.pop("background_tasks", None) - try: - model_id = form_data.get("model", None) - if model_id not in request.app.state.MODELS: - raise Exception("Model not found") - model = request.app.state.MODELS[model_id] - model_info = Models.get_model_by_id(model_id) - # Check if user has access to the model - if not BYPASS_MODEL_ACCESS_CONTROL and user.role == "user": - try: - check_model_access(user, model) - except Exception as e: - raise e + try: + if not model_item.get("direct", False): + model_id = form_data.get("model", None) + if model_id not in request.app.state.MODELS: + raise Exception("Model not found") + + model = request.app.state.MODELS[model_id] + model_info = Models.get_model_by_id(model_id) + + # Check if user has access to the model + if not BYPASS_MODEL_ACCESS_CONTROL and user.role == "user": + try: + check_model_access(user, model) + except Exception as e: + raise e + else: + model = model_item + model_info = None + + request.state.direct = True + request.state.model = model metadata = { "user_id": user.id, @@ -880,6 +935,7 @@ async def chat_completion( "features": form_data.get("features", None), "variables": form_data.get("variables", None), "model": model_info, + "direct": model_item.get("direct", False), **( {"function_calling": "native"} if form_data.get("params", {}).get("function_calling") == "native" @@ -891,6 +947,8 @@ async def chat_completion( else {} ), } + + request.state.metadata = metadata form_data["metadata"] = metadata form_data, metadata, events = await process_chat_payload( @@ -898,6 +956,7 @@ async def chat_completion( ) except Exception as e: + log.debug(f"Error processing chat payload: {e}") raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=str(e), @@ -926,6 +985,12 @@ async def chat_completed( request: Request, form_data: dict, user=Depends(get_verified_user) ): try: + model_item = form_data.pop("model_item", {}) + + if model_item.get("direct", False): + request.state.direct = True + request.state.model = model_item + return await chat_completed_handler(request, form_data, user) except Exception as e: raise HTTPException( @@ -939,6 +1004,12 @@ async def chat_action( request: Request, action_id: str, form_data: dict, user=Depends(get_verified_user) ): try: + model_item = form_data.pop("model_item", {}) + + if model_item.get("direct", False): + request.state.direct = True + request.state.model = model_item + return await chat_action_handler(request, action_id, form_data, user) except Exception as e: raise HTTPException( @@ -1011,14 +1082,17 @@ async def get_app_config(request: Request): "enable_websocket": ENABLE_WEBSOCKET_SUPPORT, **( { + "enable_direct_connections": app.state.config.ENABLE_DIRECT_CONNECTIONS, "enable_channels": app.state.config.ENABLE_CHANNELS, "enable_web_search": app.state.config.ENABLE_RAG_WEB_SEARCH, - "enable_google_drive_integration": app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, + "enable_code_interpreter": app.state.config.ENABLE_CODE_INTERPRETER, "enable_image_generation": app.state.config.ENABLE_IMAGE_GENERATION, + "enable_autocomplete_generation": app.state.config.ENABLE_AUTOCOMPLETE_GENERATION, "enable_community_sharing": app.state.config.ENABLE_COMMUNITY_SHARING, "enable_message_rating": app.state.config.ENABLE_MESSAGE_RATING, "enable_admin_export": ENABLE_ADMIN_EXPORT, "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS, + "enable_google_drive_integration": app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, } if user is not None else {} diff --git a/backend/open_webui/models/chats.py b/backend/open_webui/models/chats.py index 73ff6c102d..9e0a5865e9 100644 --- a/backend/open_webui/models/chats.py +++ b/backend/open_webui/models/chats.py @@ -470,7 +470,7 @@ class ChatTable: try: with get_db() as db: # it is possible that the shared link was deleted. hence, - # we check if the chat is still shared by checkng if a chat with the share_id exists + # we check if the chat is still shared by checking if a chat with the share_id exists chat = db.query(Chat).filter_by(share_id=id).first() if chat: diff --git a/backend/open_webui/models/users.py b/backend/open_webui/models/users.py index 5c196281f7..605299528d 100644 --- a/backend/open_webui/models/users.py +++ b/backend/open_webui/models/users.py @@ -271,6 +271,24 @@ class UsersTable: except Exception: return None + def update_user_settings_by_id(self, id: str, updated: dict) -> Optional[UserModel]: + try: + with get_db() as db: + user_settings = db.query(User).filter_by(id=id).first().settings + + if user_settings is None: + user_settings = {} + + user_settings.update(updated) + + db.query(User).filter_by(id=id).update({"settings": user_settings}) + db.commit() + + user = db.query(User).filter_by(id=id).first() + return UserModel.model_validate(user) + except Exception: + return None + def delete_user_by_id(self, id: str) -> bool: try: # Remove User from Groups diff --git a/backend/open_webui/retrieval/vector/dbs/opensearch.py b/backend/open_webui/retrieval/vector/dbs/opensearch.py index b3d8b5eb8a..b8186b3f93 100644 --- a/backend/open_webui/retrieval/vector/dbs/opensearch.py +++ b/backend/open_webui/retrieval/vector/dbs/opensearch.py @@ -113,6 +113,34 @@ class OpenSearchClient: return self._result_to_search_result(result) + def query( + self, collection_name: str, filter: dict, limit: Optional[int] = None + ) -> Optional[GetResult]: + if not self.has_collection(collection_name): + return None + + query_body = { + "query": {"bool": {"filter": []}}, + "_source": ["text", "metadata"], + } + + for field, value in filter.items(): + query_body["query"]["bool"]["filter"].append({"term": {field: value}}) + + size = limit if limit else 10 + + try: + result = self.client.search( + index=f"{self.index_prefix}_{collection_name}", + body=query_body, + size=size, + ) + + return self._result_to_get_result(result) + + except Exception as e: + return None + def get_or_create_index(self, index_name: str, dimension: int): if not self.has_index(index_name): self._create_index(index_name, dimension) diff --git a/backend/open_webui/retrieval/web/bocha.py b/backend/open_webui/retrieval/web/bocha.py new file mode 100644 index 0000000000..f26da36f84 --- /dev/null +++ b/backend/open_webui/retrieval/web/bocha.py @@ -0,0 +1,65 @@ +import logging +from typing import Optional + +import requests +import json +from open_webui.retrieval.web.main import SearchResult, get_filtered_results +from open_webui.env import SRC_LOG_LEVELS + +log = logging.getLogger(__name__) +log.setLevel(SRC_LOG_LEVELS["RAG"]) + + +def _parse_response(response): + result = {} + if "data" in response: + data = response["data"] + if "webPages" in data: + webPages = data["webPages"] + if "value" in webPages: + result["webpage"] = [ + { + "id": item.get("id", ""), + "name": item.get("name", ""), + "url": item.get("url", ""), + "snippet": item.get("snippet", ""), + "summary": item.get("summary", ""), + "siteName": item.get("siteName", ""), + "siteIcon": item.get("siteIcon", ""), + "datePublished": item.get("datePublished", "") + or item.get("dateLastCrawled", ""), + } + for item in webPages["value"] + ] + return result + + +def search_bocha( + api_key: str, query: str, count: int, filter_list: Optional[list[str]] = None +) -> list[SearchResult]: + """Search using Bocha's Search API and return the results as a list of SearchResult objects. + + Args: + api_key (str): A Bocha Search API key + query (str): The query to search for + """ + url = "https://api.bochaai.com/v1/web-search?utm_source=ollama" + headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} + + payload = json.dumps( + {"query": query, "summary": True, "freshness": "noLimit", "count": count} + ) + + response = requests.post(url, headers=headers, data=payload, timeout=5) + response.raise_for_status() + results = _parse_response(response.json()) + print(results) + if filter_list: + results = get_filtered_results(results, filter_list) + + return [ + SearchResult( + link=result["url"], title=result.get("name"), snippet=result.get("summary") + ) + for result in results.get("webpage", [])[:count] + ] diff --git a/backend/open_webui/retrieval/web/google_pse.py b/backend/open_webui/retrieval/web/google_pse.py index 2c51dd3c99..2d2b863b42 100644 --- a/backend/open_webui/retrieval/web/google_pse.py +++ b/backend/open_webui/retrieval/web/google_pse.py @@ -17,34 +17,53 @@ def search_google_pse( filter_list: Optional[list[str]] = None, ) -> list[SearchResult]: """Search using Google's Programmable Search Engine API and return the results as a list of SearchResult objects. + Handles pagination for counts greater than 10. Args: api_key (str): A Programmable Search Engine API key search_engine_id (str): A Programmable Search Engine ID query (str): The query to search for + count (int): The number of results to return (max 100, as PSE max results per query is 10 and max page is 10) + filter_list (Optional[list[str]], optional): A list of keywords to filter out from results. Defaults to None. + + Returns: + list[SearchResult]: A list of SearchResult objects. """ url = "https://www.googleapis.com/customsearch/v1" - headers = {"Content-Type": "application/json"} - params = { - "cx": search_engine_id, - "q": query, - "key": api_key, - "num": count, - } + all_results = [] + start_index = 1 # Google PSE start parameter is 1-based - response = requests.request("GET", url, headers=headers, params=params) - response.raise_for_status() + while count > 0: + num_results_this_page = min(count, 10) # Google PSE max results per page is 10 + params = { + "cx": search_engine_id, + "q": query, + "key": api_key, + "num": num_results_this_page, + "start": start_index, + } + response = requests.request("GET", url, headers=headers, params=params) + response.raise_for_status() + json_response = response.json() + results = json_response.get("items", []) + if results: # check if results are returned. If not, no more pages to fetch. + all_results.extend(results) + count -= len( + results + ) # Decrement count by the number of results fetched in this page. + start_index += 10 # Increment start index for the next page + else: + break # No more results from Google PSE, break the loop - json_response = response.json() - results = json_response.get("items", []) if filter_list: - results = get_filtered_results(results, filter_list) + all_results = get_filtered_results(all_results, filter_list) + return [ SearchResult( link=result["link"], title=result.get("title"), snippet=result.get("snippet"), ) - for result in results + for result in all_results ] diff --git a/backend/open_webui/retrieval/web/jina_search.py b/backend/open_webui/retrieval/web/jina_search.py index 3de6c18077..a87293db5c 100644 --- a/backend/open_webui/retrieval/web/jina_search.py +++ b/backend/open_webui/retrieval/web/jina_search.py @@ -20,14 +20,23 @@ def search_jina(api_key: str, query: str, count: int) -> list[SearchResult]: list[SearchResult]: A list of search results """ jina_search_endpoint = "https://s.jina.ai/" - headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} - url = str(URL(jina_search_endpoint + query)) - response = requests.get(url, headers=headers) + + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": api_key, + "X-Retain-Images": "none", + } + + payload = {"q": query, "count": count if count <= 10 else 10} + + url = str(URL(jina_search_endpoint)) + response = requests.post(url, headers=headers, json=payload) response.raise_for_status() data = response.json() results = [] - for result in data["data"][:count]: + for result in data["data"]: results.append( SearchResult( link=result["url"], diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py index c1b15772bd..e2d05ba908 100644 --- a/backend/open_webui/routers/audio.py +++ b/backend/open_webui/routers/audio.py @@ -11,6 +11,7 @@ from pydub.silence import split_on_silence import aiohttp import aiofiles import requests +import mimetypes from fastapi import ( Depends, @@ -138,6 +139,7 @@ class STTConfigForm(BaseModel): ENGINE: str MODEL: str WHISPER_MODEL: str + DEEPGRAM_API_KEY: str class AudioConfigUpdateForm(BaseModel): @@ -165,6 +167,7 @@ async def get_audio_config(request: Request, user=Depends(get_admin_user)): "ENGINE": request.app.state.config.STT_ENGINE, "MODEL": request.app.state.config.STT_MODEL, "WHISPER_MODEL": request.app.state.config.WHISPER_MODEL, + "DEEPGRAM_API_KEY": request.app.state.config.DEEPGRAM_API_KEY, }, } @@ -190,6 +193,7 @@ async def update_audio_config( request.app.state.config.STT_ENGINE = form_data.stt.ENGINE request.app.state.config.STT_MODEL = form_data.stt.MODEL request.app.state.config.WHISPER_MODEL = form_data.stt.WHISPER_MODEL + request.app.state.config.DEEPGRAM_API_KEY = form_data.stt.DEEPGRAM_API_KEY if request.app.state.config.STT_ENGINE == "": request.app.state.faster_whisper_model = set_faster_whisper_model( @@ -214,6 +218,7 @@ async def update_audio_config( "ENGINE": request.app.state.config.STT_ENGINE, "MODEL": request.app.state.config.STT_MODEL, "WHISPER_MODEL": request.app.state.config.WHISPER_MODEL, + "DEEPGRAM_API_KEY": request.app.state.config.DEEPGRAM_API_KEY, }, } @@ -521,6 +526,69 @@ def transcribe(request: Request, file_path): raise Exception(detail if detail else "Open WebUI: Server Connection Error") + elif request.app.state.config.STT_ENGINE == "deepgram": + try: + # Determine the MIME type of the file + mime, _ = mimetypes.guess_type(file_path) + if not mime: + mime = "audio/wav" # fallback to wav if undetectable + + # Read the audio file + with open(file_path, "rb") as f: + file_data = f.read() + + # Build headers and parameters + headers = { + "Authorization": f"Token {request.app.state.config.DEEPGRAM_API_KEY}", + "Content-Type": mime, + } + + # Add model if specified + params = {} + if request.app.state.config.STT_MODEL: + params["model"] = request.app.state.config.STT_MODEL + + # Make request to Deepgram API + r = requests.post( + "https://api.deepgram.com/v1/listen", + headers=headers, + params=params, + data=file_data, + ) + r.raise_for_status() + response_data = r.json() + + # Extract transcript from Deepgram response + try: + transcript = response_data["results"]["channels"][0]["alternatives"][ + 0 + ].get("transcript", "") + except (KeyError, IndexError) as e: + log.error(f"Malformed response from Deepgram: {str(e)}") + raise Exception( + "Failed to parse Deepgram response - unexpected response format" + ) + data = {"text": transcript.strip()} + + # Save transcript + transcript_file = f"{file_dir}/{id}.json" + with open(transcript_file, "w") as f: + json.dump(data, f) + + return data + + except Exception as e: + log.exception(e) + detail = None + if r is not None: + try: + res = r.json() + if "error" in res: + detail = f"External: {res['error'].get('message', '')}" + except Exception: + detail = f"External: {e}" + raise Exception(detail if detail else "Open WebUI: Server Connection Error") + def compress_audio(file_path): if os.path.getsize(file_path) > MAX_FILE_SIZE: diff --git a/backend/open_webui/routers/configs.py b/backend/open_webui/routers/configs.py index ef6c4d8c1f..016075234a 100644 --- a/backend/open_webui/routers/configs.py +++ b/backend/open_webui/routers/configs.py @@ -36,6 +36,98 @@ async def export_config(user=Depends(get_admin_user)): return get_config() +############################ +# Direct Connections Config +############################ + + +class DirectConnectionsConfigForm(BaseModel): + ENABLE_DIRECT_CONNECTIONS: bool + + +@router.get("/direct_connections", response_model=DirectConnectionsConfigForm) +async def get_direct_connections_config(request: Request, user=Depends(get_admin_user)): + return { + "ENABLE_DIRECT_CONNECTIONS": request.app.state.config.ENABLE_DIRECT_CONNECTIONS, + } + + +@router.post("/direct_connections", response_model=DirectConnectionsConfigForm) +async def set_direct_connections_config( + request: Request, + form_data: DirectConnectionsConfigForm, + user=Depends(get_admin_user), +): + request.app.state.config.ENABLE_DIRECT_CONNECTIONS = ( + form_data.ENABLE_DIRECT_CONNECTIONS + ) + return { + "ENABLE_DIRECT_CONNECTIONS": request.app.state.config.ENABLE_DIRECT_CONNECTIONS, + } + + +############################ +# CodeInterpreterConfig +############################ +class CodeInterpreterConfigForm(BaseModel): + ENABLE_CODE_INTERPRETER: bool + CODE_INTERPRETER_ENGINE: str + CODE_INTERPRETER_PROMPT_TEMPLATE: Optional[str] + CODE_INTERPRETER_JUPYTER_URL: Optional[str] + CODE_INTERPRETER_JUPYTER_AUTH: Optional[str] + CODE_INTERPRETER_JUPYTER_AUTH_TOKEN: Optional[str] + CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD: Optional[str] + + +@router.get("/code_interpreter", response_model=CodeInterpreterConfigForm) +async def get_code_interpreter_config(request: Request, user=Depends(get_admin_user)): + return { + "ENABLE_CODE_INTERPRETER": request.app.state.config.ENABLE_CODE_INTERPRETER, + "CODE_INTERPRETER_ENGINE": request.app.state.config.CODE_INTERPRETER_ENGINE, + "CODE_INTERPRETER_PROMPT_TEMPLATE": request.app.state.config.CODE_INTERPRETER_PROMPT_TEMPLATE, + "CODE_INTERPRETER_JUPYTER_URL": request.app.state.config.CODE_INTERPRETER_JUPYTER_URL, + "CODE_INTERPRETER_JUPYTER_AUTH": request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH, + "CODE_INTERPRETER_JUPYTER_AUTH_TOKEN": request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_TOKEN, + "CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD": request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD, + } + + +@router.post("/code_interpreter", response_model=CodeInterpreterConfigForm) +async def set_code_interpreter_config( + request: Request, form_data: CodeInterpreterConfigForm, user=Depends(get_admin_user) +): + request.app.state.config.ENABLE_CODE_INTERPRETER = form_data.ENABLE_CODE_INTERPRETER + request.app.state.config.CODE_INTERPRETER_ENGINE = form_data.CODE_INTERPRETER_ENGINE + request.app.state.config.CODE_INTERPRETER_PROMPT_TEMPLATE = ( + form_data.CODE_INTERPRETER_PROMPT_TEMPLATE + ) + + request.app.state.config.CODE_INTERPRETER_JUPYTER_URL = ( + form_data.CODE_INTERPRETER_JUPYTER_URL + ) + + request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH = ( + form_data.CODE_INTERPRETER_JUPYTER_AUTH + ) + + request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_TOKEN = ( + form_data.CODE_INTERPRETER_JUPYTER_AUTH_TOKEN + ) + request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD = ( + form_data.CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD + ) + + return { + "ENABLE_CODE_INTERPRETER": request.app.state.config.ENABLE_CODE_INTERPRETER, + "CODE_INTERPRETER_ENGINE": request.app.state.config.CODE_INTERPRETER_ENGINE, + "CODE_INTERPRETER_PROMPT_TEMPLATE": request.app.state.config.CODE_INTERPRETER_PROMPT_TEMPLATE, + "CODE_INTERPRETER_JUPYTER_URL": request.app.state.config.CODE_INTERPRETER_JUPYTER_URL, + "CODE_INTERPRETER_JUPYTER_AUTH": request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH, + "CODE_INTERPRETER_JUPYTER_AUTH_TOKEN": request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_TOKEN, + "CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD": request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD, + } + + ############################ # SetDefaultModels ############################ diff --git a/backend/open_webui/routers/files.py b/backend/open_webui/routers/files.py index 7160c2e86e..0513212571 100644 --- a/backend/open_webui/routers/files.py +++ b/backend/open_webui/routers/files.py @@ -3,30 +3,22 @@ import os import uuid from pathlib import Path from typing import Optional -from pydantic import BaseModel -import mimetypes from urllib.parse import quote -from open_webui.storage.provider import Storage - +from fastapi import APIRouter, Depends, File, HTTPException, Request, UploadFile, status +from fastapi.responses import FileResponse, StreamingResponse +from open_webui.constants import ERROR_MESSAGES +from open_webui.env import SRC_LOG_LEVELS from open_webui.models.files import ( FileForm, FileModel, FileModelResponse, Files, ) -from open_webui.routers.retrieval import process_file, ProcessFileForm - -from open_webui.config import UPLOAD_DIR -from open_webui.env import SRC_LOG_LEVELS -from open_webui.constants import ERROR_MESSAGES - - -from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, status, Request -from fastapi.responses import FileResponse, StreamingResponse - - +from open_webui.routers.retrieval import ProcessFileForm, process_file +from open_webui.storage.provider import Storage from open_webui.utils.auth import get_admin_user, get_verified_user +from pydantic import BaseModel log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["MODELS"]) @@ -41,7 +33,10 @@ router = APIRouter() @router.post("/", response_model=FileModelResponse) def upload_file( - request: Request, file: UploadFile = File(...), user=Depends(get_verified_user) + request: Request, + file: UploadFile = File(...), + user=Depends(get_verified_user), + file_metadata: dict = {}, ): log.info(f"file.content_type: {file.content_type}") try: @@ -65,6 +60,7 @@ def upload_file( "name": name, "content_type": file.content_type, "size": len(contents), + "data": file_metadata, }, } ), @@ -126,7 +122,7 @@ async def delete_all_files(user=Depends(get_admin_user)): Storage.delete_all_files() except Exception as e: log.exception(e) - log.error(f"Error deleting files") + log.error("Error deleting files") raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT("Error deleting files"), @@ -248,7 +244,7 @@ async def get_file_content_by_id(id: str, user=Depends(get_verified_user)): ) except Exception as e: log.exception(e) - log.error(f"Error getting file content") + log.error("Error getting file content") raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT("Error getting file content"), @@ -279,7 +275,7 @@ async def get_html_file_content_by_id(id: str, user=Depends(get_verified_user)): ) except Exception as e: log.exception(e) - log.error(f"Error getting file content") + log.error("Error getting file content") raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT("Error getting file content"), @@ -355,7 +351,7 @@ async def delete_file_by_id(id: str, user=Depends(get_verified_user)): Storage.delete_file(file.path) except Exception as e: log.exception(e) - log.error(f"Error deleting files") + log.error("Error deleting files") raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT("Error deleting files"), diff --git a/backend/open_webui/routers/images.py b/backend/open_webui/routers/images.py index 7afd9d106d..4046773dea 100644 --- a/backend/open_webui/routers/images.py +++ b/backend/open_webui/routers/images.py @@ -1,32 +1,26 @@ import asyncio import base64 +import io import json import logging import mimetypes import re -import uuid from pathlib import Path from typing import Optional import requests - - -from fastapi import Depends, FastAPI, HTTPException, Request, APIRouter -from fastapi.middleware.cors import CORSMiddleware -from pydantic import BaseModel - - +from fastapi import APIRouter, Depends, HTTPException, Request, UploadFile from open_webui.config import CACHE_DIR from open_webui.constants import ERROR_MESSAGES -from open_webui.env import ENV, SRC_LOG_LEVELS, ENABLE_FORWARD_USER_INFO_HEADERS - +from open_webui.env import ENABLE_FORWARD_USER_INFO_HEADERS, SRC_LOG_LEVELS +from open_webui.routers.files import upload_file from open_webui.utils.auth import get_admin_user, get_verified_user from open_webui.utils.images.comfyui import ( ComfyUIGenerateImageForm, ComfyUIWorkflow, comfyui_generate_image, ) - +from pydantic import BaseModel log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["IMAGES"]) @@ -271,7 +265,6 @@ async def get_image_config(request: Request, user=Depends(get_admin_user)): async def update_image_config( request: Request, form_data: ImageConfigForm, user=Depends(get_admin_user) ): - set_image_model(request, form_data.MODEL) pattern = r"^\d+x\d+$" @@ -383,40 +376,22 @@ class GenerateImageForm(BaseModel): negative_prompt: Optional[str] = None -def save_b64_image(b64_str): +def load_b64_image_data(b64_str): try: - image_id = str(uuid.uuid4()) - if "," in b64_str: header, encoded = b64_str.split(",", 1) mime_type = header.split(";")[0] - img_data = base64.b64decode(encoded) - image_format = mimetypes.guess_extension(mime_type) - - image_filename = f"{image_id}{image_format}" - file_path = IMAGE_CACHE_DIR / f"{image_filename}" - with open(file_path, "wb") as f: - f.write(img_data) - return image_filename else: - image_filename = f"{image_id}.png" - file_path = IMAGE_CACHE_DIR.joinpath(image_filename) - + mime_type = "image/png" img_data = base64.b64decode(b64_str) - - # Write the image data to a file - with open(file_path, "wb") as f: - f.write(img_data) - return image_filename - + return img_data, mime_type except Exception as e: - log.exception(f"Error saving image: {e}") + log.exception(f"Error loading image data: {e}") return None -def save_url_image(url, headers=None): - image_id = str(uuid.uuid4()) +def load_url_image_data(url, headers=None): try: if headers: r = requests.get(url, headers=headers) @@ -426,18 +401,7 @@ def save_url_image(url, headers=None): r.raise_for_status() if r.headers["content-type"].split("/")[0] == "image": mime_type = r.headers["content-type"] - image_format = mimetypes.guess_extension(mime_type) - - if not image_format: - raise ValueError("Could not determine image type from MIME type") - - image_filename = f"{image_id}{image_format}" - - file_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}") - with open(file_path, "wb") as image_file: - for chunk in r.iter_content(chunk_size=8192): - image_file.write(chunk) - return image_filename + return r.content, mime_type else: log.error("Url does not point to an image.") return None @@ -447,6 +411,20 @@ def save_url_image(url, headers=None): return None +def upload_image(request, image_metadata, image_data, content_type, user): + image_format = mimetypes.guess_extension(content_type) + file = UploadFile( + file=io.BytesIO(image_data), + filename=f"generated-image{image_format}", # will be converted to a unique ID on upload_file + headers={ + "content-type": content_type, + }, + ) + file_item = upload_file(request, file, user, file_metadata=image_metadata) + url = request.app.url_path_for("get_file_content_by_id", id=file_item.id) + return url + + @router.post("/generations") async def image_generations( request: Request, @@ -500,13 +478,9 @@ async def image_generations( images = [] for image in res["data"]: - image_filename = save_b64_image(image["b64_json"]) - images.append({"url": f"/cache/image/generations/{image_filename}"}) - file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json") - - with open(file_body_path, "w") as f: - json.dump(data, f) - + image_data, content_type = load_b64_image_data(image["b64_json"]) + url = upload_image(request, data, image_data, content_type, user) + images.append({"url": url}) return images elif request.app.state.config.IMAGE_GENERATION_ENGINE == "comfyui": @@ -552,14 +526,15 @@ async def image_generations( "Authorization": f"Bearer {request.app.state.config.COMFYUI_API_KEY}" } - image_filename = save_url_image(image["url"], headers) - images.append({"url": f"/cache/image/generations/{image_filename}"}) - file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json") - - with open(file_body_path, "w") as f: - json.dump(form_data.model_dump(exclude_none=True), f) - - log.debug(f"images: {images}") + image_data, content_type = load_url_image_data(image["url"], headers) + url = upload_image( + request, + form_data.model_dump(exclude_none=True), + image_data, + content_type, + user, + ) + images.append({"url": url}) return images elif ( request.app.state.config.IMAGE_GENERATION_ENGINE == "automatic1111" @@ -604,13 +579,15 @@ async def image_generations( images = [] for image in res["images"]: - image_filename = save_b64_image(image) - images.append({"url": f"/cache/image/generations/{image_filename}"}) - file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_filename}.json") - - with open(file_body_path, "w") as f: - json.dump({**data, "info": res["info"]}, f) - + image_data, content_type = load_b64_image_data(image) + url = upload_image( + request, + {**data, "info": res["info"]}, + image_data, + content_type, + user, + ) + images.append({"url": url}) return images except Exception as e: error = e diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index 2ab06eb95e..64373c616c 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -11,10 +11,8 @@ import re import time from typing import Optional, Union from urllib.parse import urlparse - import aiohttp from aiocache import cached - import requests from fastapi import ( @@ -990,6 +988,8 @@ async def generate_chat_completion( ) payload = {**form_data.model_dump(exclude_none=True)} + if "metadata" in payload: + del payload["metadata"] model_id = payload["model"] model_info = Models.get_model_by_id(model_id) @@ -1408,9 +1408,10 @@ async def download_model( return None +# TODO: Progress bar does not reflect size & duration of upload. @router.post("/models/upload") @router.post("/models/upload/{url_idx}") -def upload_model( +async def upload_model( request: Request, file: UploadFile = File(...), url_idx: Optional[int] = None, @@ -1419,59 +1420,85 @@ def upload_model( if url_idx is None: url_idx = 0 ollama_url = request.app.state.config.OLLAMA_BASE_URLS[url_idx] + file_path = os.path.join(UPLOAD_DIR, file.filename) + os.makedirs(UPLOAD_DIR, exist_ok=True) - file_path = f"{UPLOAD_DIR}/{file.filename}" + # --- P1: save file locally --- + chunk_size = 1024 * 1024 * 2 # 2 MB chunks + with open(file_path, "wb") as out_f: + while True: + chunk = file.file.read(chunk_size) + # log.info(f"Chunk: {str(chunk)}") # DEBUG + if not chunk: + break + out_f.write(chunk) - # Save file in chunks - with open(file_path, "wb+") as f: - for chunk in file.file: - f.write(chunk) - - def file_process_stream(): + async def file_process_stream(): nonlocal ollama_url total_size = os.path.getsize(file_path) - chunk_size = 1024 * 1024 + log.info(f"Total Model Size: {str(total_size)}") # DEBUG + + # --- P2: SSE progress + calculate sha256 hash --- + file_hash = calculate_sha256(file_path, chunk_size) + log.info(f"Model Hash: {str(file_hash)}") # DEBUG try: with open(file_path, "rb") as f: - total = 0 - done = False - - while not done: - chunk = f.read(chunk_size) - if not chunk: - done = True - continue - - total += len(chunk) - progress = round((total / total_size) * 100, 2) - - res = { + bytes_read = 0 + while chunk := f.read(chunk_size): + bytes_read += len(chunk) + progress = round(bytes_read / total_size * 100, 2) + data_msg = { "progress": progress, "total": total_size, - "completed": total, + "completed": bytes_read, } - yield f"data: {json.dumps(res)}\n\n" + yield f"data: {json.dumps(data_msg)}\n\n" - if done: - f.seek(0) - hashed = calculate_sha256(f) - f.seek(0) + # --- P3: Upload to ollama /api/blobs --- + with open(file_path, "rb") as f: + url = f"{ollama_url}/api/blobs/sha256:{file_hash}" + response = requests.post(url, data=f) - url = f"{ollama_url}/api/blobs/sha256:{hashed}" - response = requests.post(url, data=f) + if response.ok: + log.info(f"Uploaded to /api/blobs") # DEBUG + # Remove local file + os.remove(file_path) - if response.ok: - res = { - "done": done, - "blob": f"sha256:{hashed}", - "name": file.filename, - } - os.remove(file_path) - yield f"data: {json.dumps(res)}\n\n" - else: - raise Exception( - "Ollama: Could not create blob, Please try again." - ) + # Create model in ollama + model_name, ext = os.path.splitext(file.filename) + log.info(f"Created Model: {model_name}") # DEBUG + + create_payload = { + "model": model_name, + # Reference the file by its original name => the uploaded blob's digest + "files": {file.filename: f"sha256:{file_hash}"}, + } + log.info(f"Model Payload: {create_payload}") # DEBUG + + # Call ollama /api/create + # https://github.com/ollama/ollama/blob/main/docs/api.md#create-a-model + create_resp = requests.post( + url=f"{ollama_url}/api/create", + headers={"Content-Type": "application/json"}, + data=json.dumps(create_payload), + ) + + if create_resp.ok: + log.info(f"API SUCCESS!") # DEBUG + done_msg = { + "done": True, + "blob": f"sha256:{file_hash}", + "name": file.filename, + "model_created": model_name, + } + yield f"data: {json.dumps(done_msg)}\n\n" + else: + raise Exception( + f"Failed to create model in Ollama. {create_resp.text}" + ) + + else: + raise Exception("Ollama: Could not create blob, Please try again.") except Exception as e: res = {"error": str(e)} diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index d18f2a8ffc..afda362373 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -75,9 +75,9 @@ async def cleanup_response( await session.close() -def openai_o1_handler(payload): +def openai_o1_o3_handler(payload): """ - Handle O1 specific parameters + Handle o1, o3 specific parameters """ if "max_tokens" in payload: # Remove "max_tokens" from the payload @@ -621,10 +621,10 @@ async def generate_chat_completion( url = request.app.state.config.OPENAI_API_BASE_URLS[idx] key = request.app.state.config.OPENAI_API_KEYS[idx] - # Fix: O1 does not support the "max_tokens" parameter, Modify "max_tokens" to "max_completion_tokens" - is_o1 = payload["model"].lower().startswith("o1-") - if is_o1: - payload = openai_o1_handler(payload) + # Fix: o1,o3 does not support the "max_tokens" parameter, Modify "max_tokens" to "max_completion_tokens" + is_o1_o3 = payload["model"].lower().startswith(("o1", "o3-")) + if is_o1_o3: + payload = openai_o1_o3_handler(payload) elif "api.openai.com" not in url: # Remove "max_completion_tokens" from the payload for backward compatibility if "max_completion_tokens" in payload: diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 77f04a4be5..e4bab52898 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -45,6 +45,7 @@ from open_webui.retrieval.web.utils import get_web_loader from open_webui.retrieval.web.brave import search_brave from open_webui.retrieval.web.kagi import search_kagi from open_webui.retrieval.web.mojeek import search_mojeek +from open_webui.retrieval.web.bocha import search_bocha from open_webui.retrieval.web.duckduckgo import search_duckduckgo from open_webui.retrieval.web.google_pse import search_google_pse from open_webui.retrieval.web.jina_search import search_jina @@ -379,6 +380,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "brave_search_api_key": request.app.state.config.BRAVE_SEARCH_API_KEY, "kagi_search_api_key": request.app.state.config.KAGI_SEARCH_API_KEY, "mojeek_search_api_key": request.app.state.config.MOJEEK_SEARCH_API_KEY, + "bocha_search_api_key": request.app.state.config.BOCHA_SEARCH_API_KEY, "serpstack_api_key": request.app.state.config.SERPSTACK_API_KEY, "serpstack_https": request.app.state.config.SERPSTACK_HTTPS, "serper_api_key": request.app.state.config.SERPER_API_KEY, @@ -392,6 +394,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "exa_api_key": request.app.state.config.EXA_API_KEY, "result_count": request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, "concurrent_requests": request.app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS, + "domain_filter_list": request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST, }, }, } @@ -428,6 +431,7 @@ class WebSearchConfig(BaseModel): brave_search_api_key: Optional[str] = None kagi_search_api_key: Optional[str] = None mojeek_search_api_key: Optional[str] = None + bocha_search_api_key: Optional[str] = None serpstack_api_key: Optional[str] = None serpstack_https: Optional[bool] = None serper_api_key: Optional[str] = None @@ -441,6 +445,7 @@ class WebSearchConfig(BaseModel): exa_api_key: Optional[str] = None result_count: Optional[int] = None concurrent_requests: Optional[int] = None + domain_filter_list: Optional[List[str]] = [] class WebConfig(BaseModel): @@ -523,6 +528,9 @@ async def update_rag_config( request.app.state.config.MOJEEK_SEARCH_API_KEY = ( form_data.web.search.mojeek_search_api_key ) + request.app.state.config.BOCHA_SEARCH_API_KEY = ( + form_data.web.search.bocha_search_api_key + ) request.app.state.config.SERPSTACK_API_KEY = ( form_data.web.search.serpstack_api_key ) @@ -553,6 +561,9 @@ async def update_rag_config( request.app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = ( form_data.web.search.concurrent_requests ) + request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST = ( + form_data.web.search.domain_filter_list + ) return { "status": True, @@ -586,6 +597,7 @@ async def update_rag_config( "brave_search_api_key": request.app.state.config.BRAVE_SEARCH_API_KEY, "kagi_search_api_key": request.app.state.config.KAGI_SEARCH_API_KEY, "mojeek_search_api_key": request.app.state.config.MOJEEK_SEARCH_API_KEY, + "bocha_search_api_key": request.app.state.config.BOCHA_SEARCH_API_KEY, "serpstack_api_key": request.app.state.config.SERPSTACK_API_KEY, "serpstack_https": request.app.state.config.SERPSTACK_HTTPS, "serper_api_key": request.app.state.config.SERPER_API_KEY, @@ -599,6 +611,7 @@ async def update_rag_config( "exa_api_key": request.app.state.config.EXA_API_KEY, "result_count": request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, "concurrent_requests": request.app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS, + "domain_filter_list": request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST, }, }, } @@ -1107,6 +1120,7 @@ def search_web(request: Request, engine: str, query: str) -> list[SearchResult]: - BRAVE_SEARCH_API_KEY - KAGI_SEARCH_API_KEY - MOJEEK_SEARCH_API_KEY + - BOCHA_SEARCH_API_KEY - SERPSTACK_API_KEY - SERPER_API_KEY - SERPLY_API_KEY @@ -1174,6 +1188,16 @@ def search_web(request: Request, engine: str, query: str) -> list[SearchResult]: ) else: raise Exception("No MOJEEK_SEARCH_API_KEY found in environment variables") + elif engine == "bocha": + if request.app.state.config.BOCHA_SEARCH_API_KEY: + return search_bocha( + request.app.state.config.BOCHA_SEARCH_API_KEY, + query, + request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, + request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST, + ) + else: + raise Exception("No BOCHA_SEARCH_API_KEY found in environment variables") elif engine == "serpstack": if request.app.state.config.SERPSTACK_API_KEY: return search_serpstack( diff --git a/backend/open_webui/routers/tasks.py b/backend/open_webui/routers/tasks.py index f56a0232dd..91ec8e9723 100644 --- a/backend/open_webui/routers/tasks.py +++ b/backend/open_webui/routers/tasks.py @@ -139,7 +139,12 @@ async def update_task_config( async def generate_title( request: Request, form_data: dict, user=Depends(get_verified_user) ): - models = request.app.state.MODELS + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS model_id = form_data["model"] if model_id not in models: @@ -198,6 +203,7 @@ async def generate_title( } ), "metadata": { + **(request.state.metadata if hasattr(request.state, "metadata") else {}), "task": str(TASKS.TITLE_GENERATION), "task_body": form_data, "chat_id": form_data.get("chat_id", None), @@ -225,7 +231,12 @@ async def generate_chat_tags( content={"detail": "Tags generation is disabled"}, ) - models = request.app.state.MODELS + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS model_id = form_data["model"] if model_id not in models: @@ -261,6 +272,7 @@ async def generate_chat_tags( "messages": [{"role": "user", "content": content}], "stream": False, "metadata": { + **(request.state.metadata if hasattr(request.state, "metadata") else {}), "task": str(TASKS.TAGS_GENERATION), "task_body": form_data, "chat_id": form_data.get("chat_id", None), @@ -281,7 +293,12 @@ async def generate_chat_tags( async def generate_image_prompt( request: Request, form_data: dict, user=Depends(get_verified_user) ): - models = request.app.state.MODELS + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS model_id = form_data["model"] if model_id not in models: @@ -321,6 +338,7 @@ async def generate_image_prompt( "messages": [{"role": "user", "content": content}], "stream": False, "metadata": { + **(request.state.metadata if hasattr(request.state, "metadata") else {}), "task": str(TASKS.IMAGE_PROMPT_GENERATION), "task_body": form_data, "chat_id": form_data.get("chat_id", None), @@ -356,7 +374,12 @@ async def generate_queries( detail=f"Query generation is disabled", ) - models = request.app.state.MODELS + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS model_id = form_data["model"] if model_id not in models: @@ -392,6 +415,7 @@ async def generate_queries( "messages": [{"role": "user", "content": content}], "stream": False, "metadata": { + **(request.state.metadata if hasattr(request.state, "metadata") else {}), "task": str(TASKS.QUERY_GENERATION), "task_body": form_data, "chat_id": form_data.get("chat_id", None), @@ -431,7 +455,12 @@ async def generate_autocompletion( detail=f"Input prompt exceeds maximum length of {request.app.state.config.AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH}", ) - models = request.app.state.MODELS + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS model_id = form_data["model"] if model_id not in models: @@ -467,6 +496,7 @@ async def generate_autocompletion( "messages": [{"role": "user", "content": content}], "stream": False, "metadata": { + **(request.state.metadata if hasattr(request.state, "metadata") else {}), "task": str(TASKS.AUTOCOMPLETE_GENERATION), "task_body": form_data, "chat_id": form_data.get("chat_id", None), @@ -488,7 +518,12 @@ async def generate_emoji( request: Request, form_data: dict, user=Depends(get_verified_user) ): - models = request.app.state.MODELS + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS model_id = form_data["model"] if model_id not in models: @@ -531,7 +566,11 @@ async def generate_emoji( } ), "chat_id": form_data.get("chat_id", None), - "metadata": {"task": str(TASKS.EMOJI_GENERATION), "task_body": form_data}, + "metadata": { + **(request.state.metadata if hasattr(request.state, "metadata") else {}), + "task": str(TASKS.EMOJI_GENERATION), + "task_body": form_data, + }, } try: @@ -548,7 +587,13 @@ async def generate_moa_response( request: Request, form_data: dict, user=Depends(get_verified_user) ): - models = request.app.state.MODELS + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS + model_id = form_data["model"] if model_id not in models: @@ -581,6 +626,7 @@ async def generate_moa_response( "messages": [{"role": "user", "content": content}], "stream": form_data.get("stream", False), "metadata": { + **(request.state.metadata if hasattr(request.state, "metadata") else {}), "chat_id": form_data.get("chat_id", None), "task": str(TASKS.MOA_RESPONSE_GENERATION), "task_body": form_data, diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py index ddcaef7674..872212d3ce 100644 --- a/backend/open_webui/routers/users.py +++ b/backend/open_webui/routers/users.py @@ -153,7 +153,7 @@ async def get_user_settings_by_session_user(user=Depends(get_verified_user)): async def update_user_settings_by_session_user( form_data: UserSettings, user=Depends(get_verified_user) ): - user = Users.update_user_by_id(user.id, {"settings": form_data.model_dump()}) + user = Users.update_user_settings_by_id(user.id, form_data.model_dump()) if user: return user.settings else: diff --git a/backend/open_webui/socket/main.py b/backend/open_webui/socket/main.py index 3788139eaa..6f59151227 100644 --- a/backend/open_webui/socket/main.py +++ b/backend/open_webui/socket/main.py @@ -279,8 +279,8 @@ def get_event_emitter(request_info): await sio.emit( "chat-events", { - "chat_id": request_info["chat_id"], - "message_id": request_info["message_id"], + "chat_id": request_info.get("chat_id", None), + "message_id": request_info.get("message_id", None), "data": event_data, }, to=session_id, @@ -329,8 +329,8 @@ def get_event_call(request_info): response = await sio.call( "chat-events", { - "chat_id": request_info["chat_id"], - "message_id": request_info["message_id"], + "chat_id": request_info.get("chat_id", None), + "message_id": request_info.get("message_id", None), "data": event_data, }, to=request_info["session_id"], diff --git a/backend/open_webui/storage/provider.py b/backend/open_webui/storage/provider.py index 0c0a8aacfc..b03cf0a7ec 100644 --- a/backend/open_webui/storage/provider.py +++ b/backend/open_webui/storage/provider.py @@ -10,6 +10,7 @@ from open_webui.config import ( S3_ACCESS_KEY_ID, S3_BUCKET_NAME, S3_ENDPOINT_URL, + S3_KEY_PREFIX, S3_REGION_NAME, S3_SECRET_ACCESS_KEY, GCS_BUCKET_NAME, @@ -93,15 +94,17 @@ class S3StorageProvider(StorageProvider): aws_secret_access_key=S3_SECRET_ACCESS_KEY, ) self.bucket_name = S3_BUCKET_NAME + self.key_prefix = S3_KEY_PREFIX if S3_KEY_PREFIX else "" def upload_file(self, file: BinaryIO, filename: str) -> Tuple[bytes, str]: """Handles uploading of the file to S3 storage.""" _, file_path = LocalStorageProvider.upload_file(file, filename) try: - self.s3_client.upload_file(file_path, self.bucket_name, filename) + s3_key = os.path.join(self.key_prefix, filename) + self.s3_client.upload_file(file_path, self.bucket_name, s3_key) return ( open(file_path, "rb").read(), - "s3://" + self.bucket_name + "/" + filename, + "s3://" + self.bucket_name + "/" + s3_key, ) except ClientError as e: raise RuntimeError(f"Error uploading file to S3: {e}") @@ -109,18 +112,18 @@ class S3StorageProvider(StorageProvider): def get_file(self, file_path: str) -> str: """Handles downloading of the file from S3 storage.""" try: - bucket_name, key = file_path.split("//")[1].split("/") - local_file_path = f"{UPLOAD_DIR}/{key}" - self.s3_client.download_file(bucket_name, key, local_file_path) + s3_key = self._extract_s3_key(file_path) + local_file_path = self._get_local_file_path(s3_key) + self.s3_client.download_file(self.bucket_name, s3_key, local_file_path) return local_file_path except ClientError as e: raise RuntimeError(f"Error downloading file from S3: {e}") def delete_file(self, file_path: str) -> None: """Handles deletion of the file from S3 storage.""" - filename = file_path.split("/")[-1] try: - self.s3_client.delete_object(Bucket=self.bucket_name, Key=filename) + s3_key = self._extract_s3_key(file_path) + self.s3_client.delete_object(Bucket=self.bucket_name, Key=s3_key) except ClientError as e: raise RuntimeError(f"Error deleting file from S3: {e}") @@ -133,6 +136,10 @@ class S3StorageProvider(StorageProvider): response = self.s3_client.list_objects_v2(Bucket=self.bucket_name) if "Contents" in response: for content in response["Contents"]: + # Skip objects that were not uploaded from open-webui in the first place + if not content["Key"].startswith(self.key_prefix): + continue + self.s3_client.delete_object( Bucket=self.bucket_name, Key=content["Key"] ) @@ -142,6 +149,13 @@ class S3StorageProvider(StorageProvider): # Always delete from local storage LocalStorageProvider.delete_all_files() + # The s3 key is the name assigned to an object. It excludes the bucket name, but includes the internal path and the file name. + def _extract_s3_key(self, full_file_path: str) -> str: + return "/".join(full_file_path.split("//")[1].split("/")[1:]) + + def _get_local_file_path(self, s3_key: str) -> str: + return f"{UPLOAD_DIR}/{s3_key.split('/')[-1]}" + class GCSStorageProvider(StorageProvider): def __init__(self): diff --git a/backend/open_webui/utils/chat.py b/backend/open_webui/utils/chat.py index 0719f6af5b..253eaedfb9 100644 --- a/backend/open_webui/utils/chat.py +++ b/backend/open_webui/utils/chat.py @@ -7,14 +7,17 @@ from typing import Any, Optional import random import json import inspect +import uuid +import asyncio -from fastapi import Request -from starlette.responses import Response, StreamingResponse +from fastapi import Request, status +from starlette.responses import Response, StreamingResponse, JSONResponse from open_webui.models.users import UserModel from open_webui.socket.main import ( + sio, get_event_call, get_event_emitter, ) @@ -44,6 +47,10 @@ from open_webui.utils.response import ( convert_response_ollama_to_openai, convert_streaming_response_ollama_to_openai, ) +from open_webui.utils.filter import ( + get_sorted_filter_ids, + process_filter_functions, +) from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL, BYPASS_MODEL_ACCESS_CONTROL @@ -53,6 +60,101 @@ log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["MAIN"]) +async def generate_direct_chat_completion( + request: Request, + form_data: dict, + user: Any, + models: dict, +): + print("generate_direct_chat_completion") + + metadata = form_data.pop("metadata", {}) + + user_id = metadata.get("user_id") + session_id = metadata.get("session_id") + request_id = str(uuid.uuid4()) # Generate a unique request ID + + event_caller = get_event_call(metadata) + + channel = f"{user_id}:{session_id}:{request_id}" + + if form_data.get("stream"): + q = asyncio.Queue() + + async def message_listener(sid, data): + """ + Handle received socket messages and push them into the queue. + """ + await q.put(data) + + # Register the listener + sio.on(channel, message_listener) + + # Start processing chat completion in background + res = await event_caller( + { + "type": "request:chat:completion", + "data": { + "form_data": form_data, + "model": models[form_data["model"]], + "channel": channel, + "session_id": session_id, + }, + } + ) + + print("res", res) + + if res.get("status", False): + # Define a generator to stream responses + async def event_generator(): + nonlocal q + try: + while True: + data = await q.get() # Wait for new messages + if isinstance(data, dict): + if "done" in data and data["done"]: + break # Stop streaming when 'done' is received + + yield f"data: {json.dumps(data)}\n\n" + elif isinstance(data, str): + yield data + except Exception as e: + log.debug(f"Error in event generator: {e}") + pass + + # Define a background task to run the event generator + async def background(): + try: + del sio.handlers["/"][channel] + except Exception as e: + pass + + # Return the streaming response + return StreamingResponse( + event_generator(), media_type="text/event-stream", background=background + ) + else: + raise Exception(str(res)) + else: + res = await event_caller( + { + "type": "request:chat:completion", + "data": { + "form_data": form_data, + "model": models[form_data["model"]], + "channel": channel, + "session_id": session_id, + }, + } + ) + + if "error" in res: + raise Exception(res["error"]) + + return res + + async def generate_chat_completion( request: Request, form_data: dict, @@ -62,7 +164,16 @@ async def generate_chat_completion( if BYPASS_MODEL_ACCESS_CONTROL: bypass_filter = True - models = request.app.state.MODELS + if hasattr(request.state, "metadata"): + form_data["metadata"] = request.state.metadata + + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + log.debug(f"direct connection to model: {models}") + else: + models = request.app.state.MODELS model_id = form_data["model"] if model_id not in models: @@ -83,78 +194,90 @@ async def generate_chat_completion( except Exception as e: raise e - if model["owned_by"] == "arena": - model_ids = model.get("info", {}).get("meta", {}).get("model_ids") - filter_mode = model.get("info", {}).get("meta", {}).get("filter_mode") - if model_ids and filter_mode == "exclude": - model_ids = [ - model["id"] - for model in list(request.app.state.MODELS.values()) - if model.get("owned_by") != "arena" and model["id"] not in model_ids - ] - - selected_model_id = None - if isinstance(model_ids, list) and model_ids: - selected_model_id = random.choice(model_ids) - else: - model_ids = [ - model["id"] - for model in list(request.app.state.MODELS.values()) - if model.get("owned_by") != "arena" - ] - selected_model_id = random.choice(model_ids) - - form_data["model"] = selected_model_id - - if form_data.get("stream") == True: - - async def stream_wrapper(stream): - yield f"data: {json.dumps({'selected_model_id': selected_model_id})}\n\n" - async for chunk in stream: - yield chunk - - response = await generate_chat_completion( - request, form_data, user, bypass_filter=True - ) - return StreamingResponse( - stream_wrapper(response.body_iterator), - media_type="text/event-stream", - background=response.background, - ) - else: - return { - **( - await generate_chat_completion( - request, form_data, user, bypass_filter=True - ) - ), - "selected_model_id": selected_model_id, - } - - if model.get("pipe"): - # Below does not require bypass_filter because this is the only route the uses this function and it is already bypassing the filter - return await generate_function_chat_completion( + if getattr(request.state, "direct", False): + return await generate_direct_chat_completion( request, form_data, user=user, models=models ) - if model["owned_by"] == "ollama": - # Using /ollama/api/chat endpoint - form_data = convert_payload_openai_to_ollama(form_data) - response = await generate_ollama_chat_completion( - request=request, form_data=form_data, user=user, bypass_filter=bypass_filter - ) - if form_data.get("stream"): - response.headers["content-type"] = "text/event-stream" - return StreamingResponse( - convert_streaming_response_ollama_to_openai(response), - headers=dict(response.headers), - background=response.background, - ) - else: - return convert_response_ollama_to_openai(response) + else: - return await generate_openai_chat_completion( - request=request, form_data=form_data, user=user, bypass_filter=bypass_filter - ) + if model["owned_by"] == "arena": + model_ids = model.get("info", {}).get("meta", {}).get("model_ids") + filter_mode = model.get("info", {}).get("meta", {}).get("filter_mode") + if model_ids and filter_mode == "exclude": + model_ids = [ + model["id"] + for model in list(request.app.state.MODELS.values()) + if model.get("owned_by") != "arena" and model["id"] not in model_ids + ] + + selected_model_id = None + if isinstance(model_ids, list) and model_ids: + selected_model_id = random.choice(model_ids) + else: + model_ids = [ + model["id"] + for model in list(request.app.state.MODELS.values()) + if model.get("owned_by") != "arena" + ] + selected_model_id = random.choice(model_ids) + + form_data["model"] = selected_model_id + + if form_data.get("stream") == True: + + async def stream_wrapper(stream): + yield f"data: {json.dumps({'selected_model_id': selected_model_id})}\n\n" + async for chunk in stream: + yield chunk + + response = await generate_chat_completion( + request, form_data, user, bypass_filter=True + ) + return StreamingResponse( + stream_wrapper(response.body_iterator), + media_type="text/event-stream", + background=response.background, + ) + else: + return { + **( + await generate_chat_completion( + request, form_data, user, bypass_filter=True + ) + ), + "selected_model_id": selected_model_id, + } + + if model.get("pipe"): + # Below does not require bypass_filter because this is the only route the uses this function and it is already bypassing the filter + return await generate_function_chat_completion( + request, form_data, user=user, models=models + ) + if model["owned_by"] == "ollama": + # Using /ollama/api/chat endpoint + form_data = convert_payload_openai_to_ollama(form_data) + response = await generate_ollama_chat_completion( + request=request, + form_data=form_data, + user=user, + bypass_filter=bypass_filter, + ) + if form_data.get("stream"): + response.headers["content-type"] = "text/event-stream" + return StreamingResponse( + convert_streaming_response_ollama_to_openai(response), + headers=dict(response.headers), + background=response.background, + ) + else: + return convert_response_ollama_to_openai(response) + else: + return await generate_openai_chat_completion( + request=request, + form_data=form_data, + user=user, + bypass_filter=bypass_filter, + ) chat_completion = generate_chat_completion @@ -163,7 +286,13 @@ chat_completion = generate_chat_completion async def chat_completed(request: Request, form_data: dict, user: Any): if not request.app.state.MODELS: await get_all_models(request) - models = request.app.state.MODELS + + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS data = form_data model_id = data["model"] @@ -177,116 +306,38 @@ async def chat_completed(request: Request, form_data: dict, user: Any): except Exception as e: return Exception(f"Error: {e}") - __event_emitter__ = get_event_emitter( - { - "chat_id": data["chat_id"], - "message_id": data["id"], - "session_id": data["session_id"], - "user_id": user.id, - } - ) + metadata = { + "chat_id": data["chat_id"], + "message_id": data["id"], + "session_id": data["session_id"], + "user_id": user.id, + } - __event_call__ = get_event_call( - { - "chat_id": data["chat_id"], - "message_id": data["id"], - "session_id": data["session_id"], - "user_id": user.id, - } - ) + extra_params = { + "__event_emitter__": get_event_emitter(metadata), + "__event_call__": get_event_call(metadata), + "__user__": { + "id": user.id, + "email": user.email, + "name": user.name, + "role": user.role, + }, + "__metadata__": metadata, + "__request__": request, + "__model__": model, + } - def get_priority(function_id): - function = Functions.get_function_by_id(function_id) - if function is not None and hasattr(function, "valves"): - # TODO: Fix FunctionModel to include vavles - return (function.valves if function.valves else {}).get("priority", 0) - return 0 - - filter_ids = [function.id for function in Functions.get_global_filter_functions()] - if "info" in model and "meta" in model["info"]: - filter_ids.extend(model["info"]["meta"].get("filterIds", [])) - filter_ids = list(set(filter_ids)) - - enabled_filter_ids = [ - function.id - for function in Functions.get_functions_by_type("filter", active_only=True) - ] - filter_ids = [ - filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids - ] - - # Sort filter_ids by priority, using the get_priority function - filter_ids.sort(key=get_priority) - - for filter_id in filter_ids: - filter = Functions.get_function_by_id(filter_id) - if not filter: - continue - - if filter_id in request.app.state.FUNCTIONS: - function_module = request.app.state.FUNCTIONS[filter_id] - else: - function_module, _, _ = load_function_module_by_id(filter_id) - request.app.state.FUNCTIONS[filter_id] = function_module - - if hasattr(function_module, "valves") and hasattr(function_module, "Valves"): - valves = Functions.get_function_valves_by_id(filter_id) - function_module.valves = function_module.Valves( - **(valves if valves else {}) - ) - - if not hasattr(function_module, "outlet"): - continue - try: - outlet = function_module.outlet - - # Get the signature of the function - sig = inspect.signature(outlet) - params = {"body": data} - - # Extra parameters to be passed to the function - extra_params = { - "__model__": model, - "__id__": filter_id, - "__event_emitter__": __event_emitter__, - "__event_call__": __event_call__, - "__request__": request, - } - - # Add extra params in contained in function signature - for key, value in extra_params.items(): - if key in sig.parameters: - params[key] = value - - if "__user__" in sig.parameters: - __user__ = { - "id": user.id, - "email": user.email, - "name": user.name, - "role": user.role, - } - - try: - if hasattr(function_module, "UserValves"): - __user__["valves"] = function_module.UserValves( - **Functions.get_user_valves_by_id_and_user_id( - filter_id, user.id - ) - ) - except Exception as e: - print(e) - - params = {**params, "__user__": __user__} - - if inspect.iscoroutinefunction(outlet): - data = await outlet(**params) - else: - data = outlet(**params) - - except Exception as e: - return Exception(f"Error: {e}") - - return data + try: + result, _ = await process_filter_functions( + request=request, + filter_ids=get_sorted_filter_ids(model), + filter_type="outlet", + form_data=data, + extra_params=extra_params, + ) + return result + except Exception as e: + return Exception(f"Error: {e}") async def chat_action(request: Request, action_id: str, form_data: dict, user: Any): @@ -301,7 +352,13 @@ async def chat_action(request: Request, action_id: str, form_data: dict, user: A if not request.app.state.MODELS: await get_all_models(request) - models = request.app.state.MODELS + + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS data = form_data model_id = data["model"] diff --git a/backend/open_webui/utils/code_interpreter.py b/backend/open_webui/utils/code_interpreter.py new file mode 100644 index 0000000000..0a74da9c77 --- /dev/null +++ b/backend/open_webui/utils/code_interpreter.py @@ -0,0 +1,148 @@ +import asyncio +import json +import uuid +import websockets +import requests +from urllib.parse import urljoin + + +async def execute_code_jupyter( + jupyter_url, code, token=None, password=None, timeout=10 +): + """ + Executes Python code in a Jupyter kernel. + Supports authentication with a token or password. + :param jupyter_url: Jupyter server URL (e.g., "http://localhost:8888") + :param code: Code to execute + :param token: Jupyter authentication token (optional) + :param password: Jupyter password (optional) + :param timeout: WebSocket timeout in seconds (default: 10s) + :return: Dictionary with stdout, stderr, and result + - Images are prefixed with "base64:image/png," and separated by newlines if multiple. + """ + session = requests.Session() # Maintain cookies + headers = {} # Headers for requests + + # Authenticate using password + if password and not token: + try: + login_url = urljoin(jupyter_url, "/login") + response = session.get(login_url) + response.raise_for_status() + xsrf_token = session.cookies.get("_xsrf") + if not xsrf_token: + raise ValueError("Failed to fetch _xsrf token") + + login_data = {"_xsrf": xsrf_token, "password": password} + login_response = session.post( + login_url, data=login_data, cookies=session.cookies + ) + login_response.raise_for_status() + headers["X-XSRFToken"] = xsrf_token + except Exception as e: + return { + "stdout": "", + "stderr": f"Authentication Error: {str(e)}", + "result": "", + } + + # Construct API URLs with authentication token if provided + params = f"?token={token}" if token else "" + kernel_url = urljoin(jupyter_url, f"/api/kernels{params}") + + try: + response = session.post(kernel_url, headers=headers, cookies=session.cookies) + response.raise_for_status() + kernel_id = response.json()["id"] + + websocket_url = urljoin( + jupyter_url.replace("http", "ws"), + f"/api/kernels/{kernel_id}/channels{params}", + ) + + ws_headers = {} + if password and not token: + ws_headers["X-XSRFToken"] = session.cookies.get("_xsrf") + cookies = {name: value for name, value in session.cookies.items()} + ws_headers["Cookie"] = "; ".join( + [f"{name}={value}" for name, value in cookies.items()] + ) + + async with websockets.connect( + websocket_url, additional_headers=ws_headers + ) as ws: + msg_id = str(uuid.uuid4()) + execute_request = { + "header": { + "msg_id": msg_id, + "msg_type": "execute_request", + "username": "user", + "session": str(uuid.uuid4()), + "date": "", + "version": "5.3", + }, + "parent_header": {}, + "metadata": {}, + "content": { + "code": code, + "silent": False, + "store_history": True, + "user_expressions": {}, + "allow_stdin": False, + "stop_on_error": True, + }, + "channel": "shell", + } + await ws.send(json.dumps(execute_request)) + + stdout, stderr, result = "", "", [] + + while True: + try: + message = await asyncio.wait_for(ws.recv(), timeout) + message_data = json.loads(message) + if message_data.get("parent_header", {}).get("msg_id") == msg_id: + msg_type = message_data.get("msg_type") + + if msg_type == "stream": + if message_data["content"]["name"] == "stdout": + stdout += message_data["content"]["text"] + elif message_data["content"]["name"] == "stderr": + stderr += message_data["content"]["text"] + + elif msg_type in ("execute_result", "display_data"): + data = message_data["content"]["data"] + if "image/png" in data: + result.append( + f"data:image/png;base64,{data['image/png']}" + ) + elif "text/plain" in data: + result.append(data["text/plain"]) + + elif msg_type == "error": + stderr += "\n".join(message_data["content"]["traceback"]) + + elif ( + msg_type == "status" + and message_data["content"]["execution_state"] == "idle" + ): + break + + except asyncio.TimeoutError: + stderr += "\nExecution timed out." + break + + except Exception as e: + return {"stdout": "", "stderr": f"Error: {str(e)}", "result": ""} + + finally: + if kernel_id: + requests.delete( + f"{kernel_url}/{kernel_id}", headers=headers, cookies=session.cookies + ) + + return { + "stdout": stdout.strip(), + "stderr": stderr.strip(), + "result": "\n".join(result).strip() if result else "", + } diff --git a/backend/open_webui/utils/filter.py b/backend/open_webui/utils/filter.py new file mode 100644 index 0000000000..de51bd46e5 --- /dev/null +++ b/backend/open_webui/utils/filter.py @@ -0,0 +1,99 @@ +import inspect +from open_webui.utils.plugin import load_function_module_by_id +from open_webui.models.functions import Functions + + +def get_sorted_filter_ids(model): + def get_priority(function_id): + function = Functions.get_function_by_id(function_id) + if function is not None and hasattr(function, "valves"): + # TODO: Fix FunctionModel to include vavles + return (function.valves if function.valves else {}).get("priority", 0) + return 0 + + filter_ids = [function.id for function in Functions.get_global_filter_functions()] + if "info" in model and "meta" in model["info"]: + filter_ids.extend(model["info"]["meta"].get("filterIds", [])) + filter_ids = list(set(filter_ids)) + + enabled_filter_ids = [ + function.id + for function in Functions.get_functions_by_type("filter", active_only=True) + ] + + filter_ids = [fid for fid in filter_ids if fid in enabled_filter_ids] + filter_ids.sort(key=get_priority) + return filter_ids + + +async def process_filter_functions( + request, filter_ids, filter_type, form_data, extra_params +): + skip_files = None + + for filter_id in filter_ids: + filter = Functions.get_function_by_id(filter_id) + if not filter: + continue + + if filter_id in request.app.state.FUNCTIONS: + function_module = request.app.state.FUNCTIONS[filter_id] + else: + function_module, _, _ = load_function_module_by_id(filter_id) + request.app.state.FUNCTIONS[filter_id] = function_module + + # Check if the function has a file_handler variable + if filter_type == "inlet" and hasattr(function_module, "file_handler"): + skip_files = function_module.file_handler + + # Apply valves to the function + if hasattr(function_module, "valves") and hasattr(function_module, "Valves"): + valves = Functions.get_function_valves_by_id(filter_id) + function_module.valves = function_module.Valves( + **(valves if valves else {}) + ) + + # Prepare handler function + handler = getattr(function_module, filter_type, None) + if not handler: + continue + + try: + # Prepare parameters + sig = inspect.signature(handler) + params = {"body": form_data} | { + k: v + for k, v in { + **extra_params, + "__id__": filter_id, + }.items() + if k in sig.parameters + } + + # Handle user parameters + if "__user__" in sig.parameters: + if hasattr(function_module, "UserValves"): + try: + params["__user__"]["valves"] = function_module.UserValves( + **Functions.get_user_valves_by_id_and_user_id( + filter_id, params["__user__"]["id"] + ) + ) + except Exception as e: + print(e) + + # Execute handler + if inspect.iscoroutinefunction(handler): + form_data = await handler(**params) + else: + form_data = handler(**params) + + except Exception as e: + print(f"Error in {filter_type} handler {filter_id}: {e}") + raise e + + # Handle file cleanup for inlet + if skip_files and "files" in form_data.get("metadata", {}): + del form_data["metadata"]["files"] + + return form_data, {} diff --git a/backend/open_webui/utils/images/comfyui.py b/backend/open_webui/utils/images/comfyui.py index 679fff9f64..b86c257591 100644 --- a/backend/open_webui/utils/images/comfyui.py +++ b/backend/open_webui/utils/images/comfyui.py @@ -161,7 +161,7 @@ async def comfyui_generate_image( seed = ( payload.seed if payload.seed - else random.randint(0, 18446744073709551614) + else random.randint(0, 1125899906842624) ) for node_id in node.node_ids: workflow[node_id]["inputs"][node.key] = seed diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index 06763483cb..4d70ddd65f 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -68,7 +68,11 @@ from open_webui.utils.misc import ( ) from open_webui.utils.tools import get_tools from open_webui.utils.plugin import load_function_module_by_id - +from open_webui.utils.filter import ( + get_sorted_filter_ids, + process_filter_functions, +) +from open_webui.utils.code_interpreter import execute_code_jupyter from open_webui.tasks import create_task @@ -91,99 +95,6 @@ log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["MAIN"]) -async def chat_completion_filter_functions_handler(request, body, model, extra_params): - skip_files = None - - def get_filter_function_ids(model): - def get_priority(function_id): - function = Functions.get_function_by_id(function_id) - if function is not None and hasattr(function, "valves"): - # TODO: Fix FunctionModel - return (function.valves if function.valves else {}).get("priority", 0) - return 0 - - filter_ids = [ - function.id for function in Functions.get_global_filter_functions() - ] - if "info" in model and "meta" in model["info"]: - filter_ids.extend(model["info"]["meta"].get("filterIds", [])) - filter_ids = list(set(filter_ids)) - - enabled_filter_ids = [ - function.id - for function in Functions.get_functions_by_type("filter", active_only=True) - ] - - filter_ids = [ - filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids - ] - - filter_ids.sort(key=get_priority) - return filter_ids - - filter_ids = get_filter_function_ids(model) - for filter_id in filter_ids: - filter = Functions.get_function_by_id(filter_id) - if not filter: - continue - - if filter_id in request.app.state.FUNCTIONS: - function_module = request.app.state.FUNCTIONS[filter_id] - else: - function_module, _, _ = load_function_module_by_id(filter_id) - request.app.state.FUNCTIONS[filter_id] = function_module - - # Check if the function has a file_handler variable - if hasattr(function_module, "file_handler"): - skip_files = function_module.file_handler - - # Apply valves to the function - if hasattr(function_module, "valves") and hasattr(function_module, "Valves"): - valves = Functions.get_function_valves_by_id(filter_id) - function_module.valves = function_module.Valves( - **(valves if valves else {}) - ) - - if hasattr(function_module, "inlet"): - try: - inlet = function_module.inlet - - # Create a dictionary of parameters to be passed to the function - params = {"body": body} | { - k: v - for k, v in { - **extra_params, - "__model__": model, - "__id__": filter_id, - }.items() - if k in inspect.signature(inlet).parameters - } - - if "__user__" in params and hasattr(function_module, "UserValves"): - try: - params["__user__"]["valves"] = function_module.UserValves( - **Functions.get_user_valves_by_id_and_user_id( - filter_id, params["__user__"]["id"] - ) - ) - except Exception as e: - print(e) - - if inspect.iscoroutinefunction(inlet): - body = await inlet(**params) - else: - body = inlet(**params) - - except Exception as e: - print(f"Error: {e}") - raise e - - if skip_files and "files" in body.get("metadata", {}): - del body["metadata"]["files"] - - return body, {} - - async def chat_completion_tools_handler( request: Request, body: dict, user: UserModel, models, tools ) -> tuple[dict, dict]: @@ -572,13 +483,13 @@ async def chat_image_generation_handler( { "type": "status", "data": { - "description": f"An error occured while generating an image", + "description": f"An error occurred while generating an image", "done": True, }, } ) - system_message_content = "Unable to generate an image, tell the user that an error occured" + system_message_content = "Unable to generate an image, tell the user that an error occurred" if system_message_content: form_data["messages"] = add_or_update_system_message( @@ -706,11 +617,18 @@ async def process_chat_payload(request, form_data, metadata, user, model): }, "__metadata__": metadata, "__request__": request, + "__model__": model, } # Initialize events to store additional event to be sent to the client # Initialize contexts and citation - models = request.app.state.MODELS + if getattr(request.state, "direct", False) and hasattr(request.state, "model"): + models = { + request.state.model["id"]: request.state.model, + } + else: + models = request.app.state.MODELS + task_model_id = get_task_model_id( form_data["model"], request.app.state.config.TASK_MODEL, @@ -778,12 +696,21 @@ async def process_chat_payload(request, form_data, metadata, user, model): if "code_interpreter" in features and features["code_interpreter"]: form_data["messages"] = add_or_update_user_message( - DEFAULT_CODE_INTERPRETER_PROMPT, form_data["messages"] + ( + request.app.state.config.CODE_INTERPRETER_PROMPT_TEMPLATE + if request.app.state.config.CODE_INTERPRETER_PROMPT_TEMPLATE != "" + else DEFAULT_CODE_INTERPRETER_PROMPT + ), + form_data["messages"], ) try: - form_data, flags = await chat_completion_filter_functions_handler( - request, form_data, model, extra_params + form_data, flags = await process_filter_functions( + request=request, + filter_ids=get_sorted_filter_ids(model), + filter_type="inlet", + form_data=form_data, + extra_params=extra_params, ) except Exception as e: raise Exception(f"Error: {e}") @@ -851,17 +778,7 @@ async def process_chat_payload(request, form_data, metadata, user, model): if "document" in source: for doc_idx, doc_context in enumerate(source["document"]): - doc_metadata = source.get("metadata") - doc_source_id = None - - if doc_metadata: - doc_source_id = doc_metadata[doc_idx].get("source", source_id) - - if source_id: - context_string += f"{doc_source_id if doc_source_id is not None else source_id}{doc_context}\n" - else: - # If there is no source_id, then do not include the source_id tag - context_string += f"{doc_context}\n" + context_string += f"{doc_idx}{doc_context}\n" context_string = context_string.strip() prompt = get_last_user_message(form_data["messages"]) @@ -1122,6 +1039,20 @@ async def process_chat_response( }, ) + def split_content_and_whitespace(content): + content_stripped = content.rstrip() + original_whitespace = ( + content[len(content_stripped) :] + if len(content) > len(content_stripped) + else "" + ) + return content_stripped, original_whitespace + + def is_opening_code_block(content): + backtick_segments = content.split("```") + # Even number of segments means the last backticks are opening a new block + return len(backtick_segments) > 1 and len(backtick_segments) % 2 == 0 + # Handle as a background task async def post_response_handler(response, events): def serialize_content_blocks(content_blocks, raw=False): @@ -1188,6 +1119,19 @@ async def process_chat_response( output = block.get("output", None) lang = attributes.get("lang", "") + content_stripped, original_whitespace = ( + split_content_and_whitespace(content) + ) + if is_opening_code_block(content_stripped): + # Remove trailing backticks that would open a new block + content = ( + content_stripped.rstrip("`").rstrip() + + original_whitespace + ) + else: + # Keep content as is - either closing backticks or no backticks + content = content_stripped + original_whitespace + if output: output = html.escape(json.dumps(output)) @@ -1242,10 +1186,10 @@ async def process_chat_response( match.end() : ] # Content after opening tag - # Remove the start tag from the currently handling text block + # Remove the start tag and after from the currently handling text block content_blocks[-1]["content"] = content_blocks[-1][ "content" - ].replace(match.group(0), "") + ].replace(match.group(0) + after_tag, "") if before_tag: content_blocks[-1]["content"] = before_tag @@ -1702,26 +1646,70 @@ async def process_chat_response( content_blocks[-1]["type"] == "code_interpreter" and retries < MAX_RETRIES ): + await event_emitter( + { + "type": "chat:completion", + "data": { + "content": serialize_content_blocks(content_blocks), + }, + } + ) + retries += 1 log.debug(f"Attempt count: {retries}") output = "" try: if content_blocks[-1]["attributes"].get("type") == "code": - output = await event_caller( - { - "type": "execute:python", - "data": { - "id": str(uuid4()), - "code": content_blocks[-1]["content"], - }, + code = content_blocks[-1]["content"] + + if ( + request.app.state.config.CODE_INTERPRETER_ENGINE + == "pyodide" + ): + output = await event_caller( + { + "type": "execute:python", + "data": { + "id": str(uuid4()), + "code": code, + "session_id": metadata.get( + "session_id", None + ), + }, + } + ) + elif ( + request.app.state.config.CODE_INTERPRETER_ENGINE + == "jupyter" + ): + output = await execute_code_jupyter( + request.app.state.config.CODE_INTERPRETER_JUPYTER_URL, + code, + ( + request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_TOKEN + if request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH + == "token" + else None + ), + ( + request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH_PASSWORD + if request.app.state.config.CODE_INTERPRETER_JUPYTER_AUTH + == "password" + else None + ), + ) + else: + output = { + "stdout": "Code interpreter engine not configured." } - ) + + log.debug(f"Code interpreter output: {output}") if isinstance(output, dict): stdout = output.get("stdout", "") - if stdout: + if isinstance(stdout, str): stdoutLines = stdout.split("\n") for idx, line in enumerate(stdoutLines): if "data:image/png;base64" in line: @@ -1750,6 +1738,38 @@ async def process_chat_response( ) output["stdout"] = "\n".join(stdoutLines) + + result = output.get("result", "") + + if isinstance(result, str): + resultLines = result.split("\n") + for idx, line in enumerate(resultLines): + if "data:image/png;base64" in line: + id = str(uuid4()) + + # ensure the path exists + os.makedirs( + os.path.join(CACHE_DIR, "images"), + exist_ok=True, + ) + + image_path = os.path.join( + CACHE_DIR, + f"images/{id}.png", + ) + + with open(image_path, "wb") as f: + f.write( + base64.b64decode( + line.split(",")[1] + ) + ) + + resultLines[idx] = ( + f"![Output Image {idx}](/cache/images/{id}.png)" + ) + + output["result"] = "\n".join(resultLines) except Exception as e: output = str(e) @@ -1771,6 +1791,8 @@ async def process_chat_response( } ) + print(content_blocks, serialize_content_blocks(content_blocks)) + try: res = await generate_chat_completion( request, diff --git a/backend/open_webui/utils/misc.py b/backend/open_webui/utils/misc.py index b073939219..4eace24dc3 100644 --- a/backend/open_webui/utils/misc.py +++ b/backend/open_webui/utils/misc.py @@ -217,12 +217,19 @@ def openai_chat_chunk_message_template( def openai_chat_completion_message_template( - model: str, message: Optional[str] = None, usage: Optional[dict] = None + model: str, + message: Optional[str] = None, + tool_calls: Optional[list[dict]] = None, + usage: Optional[dict] = None, ) -> dict: template = openai_chat_message_template(model) template["object"] = "chat.completion" if message is not None: template["choices"][0]["message"] = {"content": message, "role": "assistant"} + + if tool_calls: + template["choices"][0]["tool_calls"] = tool_calls + template["choices"][0]["finish_reason"] = "stop" if usage: @@ -244,11 +251,12 @@ def get_gravatar_url(email): return f"https://www.gravatar.com/avatar/{hash_hex}?d=mp" -def calculate_sha256(file): +def calculate_sha256(file_path, chunk_size): + # Compute SHA-256 hash of a file efficiently in chunks sha256 = hashlib.sha256() - # Read the file in chunks to efficiently handle large files - for chunk in iter(lambda: file.read(8192), b""): - sha256.update(chunk) + with open(file_path, "rb") as f: + while chunk := f.read(chunk_size): + sha256.update(chunk) return sha256.hexdigest() diff --git a/backend/open_webui/utils/oauth.py b/backend/open_webui/utils/oauth.py index 7c0c53c2d5..463f67adcc 100644 --- a/backend/open_webui/utils/oauth.py +++ b/backend/open_webui/utils/oauth.py @@ -1,6 +1,7 @@ import base64 import logging import mimetypes +import sys import uuid import aiohttp @@ -40,7 +41,11 @@ from open_webui.utils.misc import parse_duration from open_webui.utils.auth import get_password_hash, create_token from open_webui.utils.webhook import post_webhook +from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL + +logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL) log = logging.getLogger(__name__) +log.setLevel(SRC_LOG_LEVELS["OAUTH"]) auth_manager_config = AppConfig() auth_manager_config.DEFAULT_USER_ROLE = DEFAULT_USER_ROLE @@ -72,12 +77,15 @@ class OAuthManager: def get_user_role(self, user, user_data): if user and Users.get_num_users() == 1: # If the user is the only user, assign the role "admin" - actually repairs role for single user on login + log.debug("Assigning the only user the admin role") return "admin" if not user and Users.get_num_users() == 0: # If there are no users, assign the role "admin", as the first user will be an admin + log.debug("Assigning the first user the admin role") return "admin" if auth_manager_config.ENABLE_OAUTH_ROLE_MANAGEMENT: + log.debug("Running OAUTH Role management") oauth_claim = auth_manager_config.OAUTH_ROLES_CLAIM oauth_allowed_roles = auth_manager_config.OAUTH_ALLOWED_ROLES oauth_admin_roles = auth_manager_config.OAUTH_ADMIN_ROLES @@ -93,17 +101,24 @@ class OAuthManager: claim_data = claim_data.get(nested_claim, {}) oauth_roles = claim_data if isinstance(claim_data, list) else None + log.debug(f"Oauth Roles claim: {oauth_claim}") + log.debug(f"User roles from oauth: {oauth_roles}") + log.debug(f"Accepted user roles: {oauth_allowed_roles}") + log.debug(f"Accepted admin roles: {oauth_admin_roles}") + # If any roles are found, check if they match the allowed or admin roles if oauth_roles: # If role management is enabled, and matching roles are provided, use the roles for allowed_role in oauth_allowed_roles: # If the user has any of the allowed roles, assign the role "user" if allowed_role in oauth_roles: + log.debug("Assigned user the user role") role = "user" break for admin_role in oauth_admin_roles: # If the user has any of the admin roles, assign the role "admin" if admin_role in oauth_roles: + log.debug("Assigned user the admin role") role = "admin" break else: @@ -117,16 +132,27 @@ class OAuthManager: return role def update_user_groups(self, user, user_data, default_permissions): + log.debug("Running OAUTH Group management") oauth_claim = auth_manager_config.OAUTH_GROUPS_CLAIM user_oauth_groups: list[str] = user_data.get(oauth_claim, list()) user_current_groups: list[GroupModel] = Groups.get_groups_by_member_id(user.id) all_available_groups: list[GroupModel] = Groups.get_groups() + log.debug(f"Oauth Groups claim: {oauth_claim}") + log.debug(f"User oauth groups: {user_oauth_groups}") + log.debug(f"User's current groups: {[g.name for g in user_current_groups]}") + log.debug( + f"All groups available in OpenWebUI: {[g.name for g in all_available_groups]}" + ) + # Remove groups that user is no longer a part of for group_model in user_current_groups: if group_model.name not in user_oauth_groups: # Remove group from user + log.debug( + f"Removing user from group {group_model.name} as it is no longer in their oauth groups" + ) user_ids = group_model.user_ids user_ids = [i for i in user_ids if i != user.id] @@ -152,6 +178,9 @@ class OAuthManager: gm.name == group_model.name for gm in user_current_groups ): # Add user to group + log.debug( + f"Adding user to group {group_model.name} as it was found in their oauth groups" + ) user_ids = group_model.user_ids user_ids.append(user.id) @@ -193,7 +222,7 @@ class OAuthManager: log.warning(f"OAuth callback error: {e}") raise HTTPException(400, detail=ERROR_MESSAGES.INVALID_CRED) user_data: UserInfo = token.get("userinfo") - if not user_data: + if not user_data or "email" not in user_data: user_data: UserInfo = await client.userinfo(token=token) if not user_data: log.warning(f"OAuth callback failed, user data is missing: {token}") @@ -261,15 +290,20 @@ class OAuthManager: } async with aiohttp.ClientSession() as session: async with session.get(picture_url, **get_kwargs) as resp: - picture = await resp.read() - base64_encoded_picture = base64.b64encode( - picture - ).decode("utf-8") - guessed_mime_type = mimetypes.guess_type(picture_url)[0] - if guessed_mime_type is None: - # assume JPG, browsers are tolerant enough of image formats - guessed_mime_type = "image/jpeg" - picture_url = f"data:{guessed_mime_type};base64,{base64_encoded_picture}" + if resp.ok: + picture = await resp.read() + base64_encoded_picture = base64.b64encode( + picture + ).decode("utf-8") + guessed_mime_type = mimetypes.guess_type( + picture_url + )[0] + if guessed_mime_type is None: + # assume JPG, browsers are tolerant enough of image formats + guessed_mime_type = "image/jpeg" + picture_url = f"data:{guessed_mime_type};base64,{base64_encoded_picture}" + else: + picture_url = "/user.png" except Exception as e: log.error( f"Error downloading profile image '{picture_url}': {e}" @@ -281,7 +315,8 @@ class OAuthManager: username_claim = auth_manager_config.OAUTH_USERNAME_CLAIM name = user_data.get(username_claim) - if not isinstance(user, str): + if not name: + log.warning("Username claim is missing, using email as name") name = email role = self.get_user_role(None, user_data) diff --git a/backend/open_webui/utils/payload.py b/backend/open_webui/utils/payload.py index b68b313de7..5eb040434b 100644 --- a/backend/open_webui/utils/payload.py +++ b/backend/open_webui/utils/payload.py @@ -14,6 +14,12 @@ def apply_model_system_prompt_to_body( if not system: return form_data + # Metadata (WebUI Usage) + if metadata: + variables = metadata.get("variables", {}) + if variables: + system = prompt_variables_template(system, variables) + # Legacy (API Usage) if user: template_params = { @@ -25,12 +31,6 @@ def apply_model_system_prompt_to_body( system = prompt_template(system, **template_params) - # Metadata (WebUI Usage) - if metadata: - variables = metadata.get("variables", {}) - if variables: - system = prompt_variables_template(system, variables) - form_data["messages"] = add_or_update_system_message( system, form_data.get("messages", []) ) diff --git a/backend/open_webui/utils/pdf_generator.py b/backend/open_webui/utils/pdf_generator.py index 1bb9f76b30..8b04dd81bc 100644 --- a/backend/open_webui/utils/pdf_generator.py +++ b/backend/open_webui/utils/pdf_generator.py @@ -2,6 +2,7 @@ from datetime import datetime from io import BytesIO from pathlib import Path from typing import Dict, Any, List +from html import escape from markdown import markdown @@ -41,13 +42,13 @@ class PDFGenerator: def _build_html_message(self, message: Dict[str, Any]) -> str: """Build HTML for a single message.""" - role = message.get("role", "user") - content = message.get("content", "") + role = escape(message.get("role", "user")) + content = escape(message.get("content", "")) timestamp = message.get("timestamp") - model = message.get("model") if role == "assistant" else "" + model = escape(message.get("model") if role == "assistant" else "") - date_str = self.format_timestamp(timestamp) if timestamp else "" + date_str = escape(self.format_timestamp(timestamp) if timestamp else "") # extends pymdownx extension to convert markdown to html. # - https://facelessuser.github.io/pymdown-extensions/usage_notes/ @@ -76,6 +77,7 @@ class PDFGenerator: def _generate_html_body(self) -> str: """Generate the full HTML body for the PDF.""" + escaped_title = escape(self.form_data.title) return f""" @@ -84,7 +86,7 @@ class PDFGenerator:
-

{self.form_data.title}

+

{escaped_title}

{self.messages_html}
diff --git a/backend/open_webui/utils/response.py b/backend/open_webui/utils/response.py index b16805bf35..f9979b4a27 100644 --- a/backend/open_webui/utils/response.py +++ b/backend/open_webui/utils/response.py @@ -6,9 +6,32 @@ from open_webui.utils.misc import ( ) +def convert_ollama_tool_call_to_openai(tool_calls: dict) -> dict: + openai_tool_calls = [] + for tool_call in tool_calls: + openai_tool_call = { + "index": tool_call.get("index", 0), + "id": tool_call.get("id", f"call_{str(uuid4())}"), + "type": "function", + "function": { + "name": tool_call.get("function", {}).get("name", ""), + "arguments": json.dumps( + tool_call.get("function", {}).get("arguments", {}) + ), + }, + } + openai_tool_calls.append(openai_tool_call) + return openai_tool_calls + + def convert_response_ollama_to_openai(ollama_response: dict) -> dict: model = ollama_response.get("model", "ollama") message_content = ollama_response.get("message", {}).get("content", "") + tool_calls = ollama_response.get("message", {}).get("tool_calls", None) + openai_tool_calls = None + + if tool_calls: + openai_tool_calls = convert_ollama_tool_call_to_openai(tool_calls) data = ollama_response usage = { @@ -51,7 +74,9 @@ def convert_response_ollama_to_openai(ollama_response: dict) -> dict: ), } - response = openai_chat_completion_message_template(model, message_content, usage) + response = openai_chat_completion_message_template( + model, message_content, openai_tool_calls, usage + ) return response @@ -65,18 +90,7 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response) openai_tool_calls = None if tool_calls: - openai_tool_calls = [] - for tool_call in tool_calls: - openai_tool_call = { - "index": tool_call.get("index", 0), - "id": tool_call.get("id", f"call_{str(uuid4())}"), - "type": "function", - "function": { - "name": tool_call.get("function", {}).get("name", ""), - "arguments": f"{tool_call.get('function', {}).get('arguments', {})}", - }, - } - openai_tool_calls.append(openai_tool_call) + openai_tool_calls = convert_ollama_tool_call_to_openai(tool_calls) done = data.get("done", False) diff --git a/backend/requirements.txt b/backend/requirements.txt index 14ad4b9cdf..3567924a84 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -32,6 +32,8 @@ boto3==1.35.53 argon2-cffi==23.1.0 APScheduler==3.10.4 +RestrictedPython==8.0 + # AI libraries openai anthropic @@ -45,7 +47,7 @@ fake-useragent==1.5.1 chromadb==0.6.2 pymilvus==2.5.0 qdrant-client~=1.12.0 -opensearch-py==2.7.1 +opensearch-py==2.8.0 transformers @@ -77,7 +79,7 @@ opencv-python-headless==4.11.0.86 rapidocr-onnxruntime==1.3.24 rank-bm25==0.2.2 -faster-whisper==1.0.3 +faster-whisper==1.1.1 PyJWT[crypto]==2.10.1 authlib==1.4.1 @@ -89,7 +91,7 @@ pytube==15.0.0 extract_msg pydub -duckduckgo-search~=7.3.0 +duckduckgo-search~=7.3.2 ## Google Drive google-api-python-client diff --git a/package-lock.json b/package-lock.json index e079e15364..710ee7fcec 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.5.10", + "version": "0.5.11", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.5.10", + "version": "0.5.11", "dependencies": { "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", @@ -42,6 +42,7 @@ "idb": "^7.1.1", "js-sha256": "^0.10.1", "katex": "^0.16.21", + "kokoro-js": "^1.1.1", "marked": "^9.1.0", "mermaid": "^10.9.3", "paneforge": "^0.0.6", @@ -62,7 +63,8 @@ "svelte-sonner": "^0.3.19", "tippy.js": "^6.3.7", "turndown": "^7.2.0", - "uuid": "^9.0.1" + "uuid": "^9.0.1", + "vite-plugin-static-copy": "^2.2.0" }, "devDependencies": { "@sveltejs/adapter-auto": "3.2.2", @@ -91,7 +93,7 @@ "tslib": "^2.4.1", "typescript": "^5.5.4", "vite": "^5.4.14", - "vitest": "^1.6.0" + "vitest": "^1.6.1" }, "engines": { "node": ">=18.13.0 <=22.x.x", @@ -1078,21 +1080,23 @@ } }, "node_modules/@huggingface/jinja": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.3.1.tgz", - "integrity": "sha512-SbcBWUKDQ76lzlVYOloscUk0SJjuL1LcbZsfQv/Bxxc7dwJMYuS+DAQ+HhVw6ZkTFXArejaX5HQRuCuleYwYdA==", + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.3.3.tgz", + "integrity": "sha512-vQQr2JyWvVFba3Lj9es4q9vCl1sAc74fdgnEMoX8qHrXtswap9ge9uO3ONDzQB0cQ0PUyaKY2N6HaVbTBvSXvw==", + "license": "MIT", "engines": { "node": ">=18" } }, "node_modules/@huggingface/transformers": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@huggingface/transformers/-/transformers-3.0.0.tgz", - "integrity": "sha512-OWIPnTijAw4DQ+IFHBOrej2SDdYyykYlTtpTLCEt5MZq/e9Cb65RS2YVhdGcgbaW/6JAL3i8ZA5UhDeWGm4iRQ==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@huggingface/transformers/-/transformers-3.3.3.tgz", + "integrity": "sha512-OcMubhBjW6u1xnp0zSt5SvCxdGHuhP2k+w2Vlm3i0vNcTJhJTZWxxYQmPBfcb7PX+Q6c43lGSzWD6tsJFwka4Q==", + "license": "Apache-2.0", "dependencies": { - "@huggingface/jinja": "^0.3.0", - "onnxruntime-node": "1.19.2", - "onnxruntime-web": "1.20.0-dev.20241016-2b8fc5529b", + "@huggingface/jinja": "^0.3.3", + "onnxruntime-node": "1.20.1", + "onnxruntime-web": "1.21.0-dev.20250206-d981b153d3", "sharp": "^0.33.5" } }, @@ -1546,6 +1550,7 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "license": "ISC", "dependencies": { "minipass": "^7.0.4" }, @@ -1558,6 +1563,7 @@ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", "dev": true, + "license": "MIT", "dependencies": { "@sinclair/typebox": "^0.27.8" }, @@ -1798,7 +1804,6 @@ "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" @@ -1811,7 +1816,6 @@ "version": "2.0.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true, "engines": { "node": ">= 8" } @@ -1820,7 +1824,6 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" @@ -1856,27 +1859,32 @@ "node_modules/@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/base64": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/codegen": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/eventemitter": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/fetch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "license": "BSD-3-Clause", "dependencies": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" @@ -1885,27 +1893,32 @@ "node_modules/@protobufjs/float": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/inquire": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/path": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/pool": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/utf8": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", + "license": "BSD-3-Clause" }, "node_modules/@pyscript/core": { "version": "0.4.32", @@ -2210,7 +2223,8 @@ "version": "0.27.8", "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@socket.io/component-emitter": { "version": "3.1.2", @@ -3146,13 +3160,14 @@ "integrity": "sha512-g7f0IkJdPW2xhY7H4iE72DAsIyfuwEFc6JWc2tYFwKDMWWAF699vGjrM348cwQuOXgHpe1gWFe+Eiyjx/ewvvw==" }, "node_modules/@vitest/expect": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.6.0.tgz", - "integrity": "sha512-ixEvFVQjycy/oNgHjqsL6AZCDduC+tflRluaHIzKIsdbzkLn2U/iBnVeJwB6HsIjQBdfMR8Z0tRxKUsvFJEeWQ==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.6.1.tgz", + "integrity": "sha512-jXL+9+ZNIJKruofqXuuTClf44eSpcHlgj3CiuNihUF3Ioujtmc0zIa3UJOW5RjDK1YLBJZnWBlPuqhYycLioog==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/spy": "1.6.0", - "@vitest/utils": "1.6.0", + "@vitest/spy": "1.6.1", + "@vitest/utils": "1.6.1", "chai": "^4.3.10" }, "funding": { @@ -3160,12 +3175,13 @@ } }, "node_modules/@vitest/runner": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.6.0.tgz", - "integrity": "sha512-P4xgwPjwesuBiHisAVz/LSSZtDjOTPYZVmNAnpHHSR6ONrf8eCJOFRvUwdHn30F5M1fxhqtl7QZQUk2dprIXAg==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.6.1.tgz", + "integrity": "sha512-3nSnYXkVkf3mXFfE7vVyPmi3Sazhb/2cfZGGs0JRzFsPFvAMBEcrweV1V1GsrstdXeKCTXlJbvnQwGWgEIHmOA==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/utils": "1.6.0", + "@vitest/utils": "1.6.1", "p-limit": "^5.0.0", "pathe": "^1.1.1" }, @@ -3178,6 +3194,7 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz", "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==", "dev": true, + "license": "MIT", "dependencies": { "yocto-queue": "^1.0.0" }, @@ -3189,10 +3206,11 @@ } }, "node_modules/@vitest/runner/node_modules/yocto-queue": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", - "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.1.1.tgz", + "integrity": "sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==", "dev": true, + "license": "MIT", "engines": { "node": ">=12.20" }, @@ -3201,10 +3219,11 @@ } }, "node_modules/@vitest/snapshot": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.6.0.tgz", - "integrity": "sha512-+Hx43f8Chus+DCmygqqfetcAZrDJwvTj0ymqjQq4CvmpKFSTVteEOBzCusu1x2tt4OJcvBflyHUE0DZSLgEMtQ==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.6.1.tgz", + "integrity": "sha512-WvidQuWAzU2p95u8GAKlRMqMyN1yOJkGHnx3M1PL9Raf7AQ1kwLKg04ADlCa3+OXUZE7BceOhVZiuWAbzCKcUQ==", "dev": true, + "license": "MIT", "dependencies": { "magic-string": "^0.30.5", "pathe": "^1.1.1", @@ -3215,10 +3234,11 @@ } }, "node_modules/@vitest/spy": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.6.0.tgz", - "integrity": "sha512-leUTap6B/cqi/bQkXUu6bQV5TZPx7pmMBKBQiI0rJA8c3pB56ZsaTbREnF7CJfmvAS4V2cXIBAh/3rVwrrCYgw==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.6.1.tgz", + "integrity": "sha512-MGcMmpGkZebsMZhbQKkAf9CX5zGvjkBTqf8Zx3ApYWXr3wG+QvEu2eXWfnIIWYSJExIp4V9FCKDEeygzkYrXMw==", "dev": true, + "license": "MIT", "dependencies": { "tinyspy": "^2.2.0" }, @@ -3227,10 +3247,11 @@ } }, "node_modules/@vitest/utils": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.6.0.tgz", - "integrity": "sha512-21cPiuGMoMZwiOHa2i4LXkMkMkCGzA+MVFV70jRwHo95dL4x/ts5GZhML1QWuy7yfp3WzK3lRvZi3JnXTYqrBw==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.6.1.tgz", + "integrity": "sha512-jOrrUvXM4Av9ZWiG1EajNto0u96kWAhJ1LmPmJhXXQx/32MecEKd10pOLYgS2BQx1TgkGhloPU1ArDW2vvaY6g==", "dev": true, + "license": "MIT", "dependencies": { "diff-sequences": "^29.6.3", "estree-walker": "^3.0.3", @@ -3246,6 +3267,7 @@ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", "dev": true, + "license": "MIT", "dependencies": { "@types/estree": "^1.0.0" } @@ -3416,7 +3438,6 @@ "version": "3.1.3", "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "dev": true, "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -3496,6 +3517,7 @@ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", "dev": true, + "license": "MIT", "engines": { "node": "*" } @@ -3644,7 +3666,6 @@ "version": "2.3.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", - "dev": true, "engines": { "node": ">=8" }, @@ -3720,7 +3741,6 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dev": true, "dependencies": { "fill-range": "^7.1.1" }, @@ -3895,6 +3915,7 @@ "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -3972,10 +3993,11 @@ "dev": true }, "node_modules/chai": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.4.1.tgz", - "integrity": "sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.5.0.tgz", + "integrity": "sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==", "dev": true, + "license": "MIT", "dependencies": { "assertion-error": "^1.1.0", "check-error": "^1.0.3", @@ -3983,7 +4005,7 @@ "get-func-name": "^2.0.2", "loupe": "^2.3.6", "pathval": "^1.1.1", - "type-detect": "^4.0.8" + "type-detect": "^4.1.0" }, "engines": { "node": ">=4" @@ -4019,6 +4041,7 @@ "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz", "integrity": "sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==", "dev": true, + "license": "MIT", "dependencies": { "get-func-name": "^2.0.2" }, @@ -4077,7 +4100,6 @@ "version": "3.6.0", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", - "dev": true, "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -4101,7 +4123,6 @@ "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, "dependencies": { "is-glob": "^4.0.1" }, @@ -4113,6 +4134,7 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "license": "BlueOak-1.0.0", "engines": { "node": ">=18" } @@ -5135,10 +5157,11 @@ } }, "node_modules/deep-eql": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", - "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.4.tgz", + "integrity": "sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==", "dev": true, + "license": "MIT", "dependencies": { "type-detect": "^4.0.0" }, @@ -5257,6 +5280,7 @@ "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", "dev": true, + "license": "MIT", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } @@ -5899,7 +5923,6 @@ "version": "3.3.2", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", - "dev": true, "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -5915,7 +5938,6 @@ "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, "dependencies": { "is-glob": "^4.0.1" }, @@ -5939,7 +5961,6 @@ "version": "1.17.1", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", - "dev": true, "dependencies": { "reusify": "^1.0.4" } @@ -5998,7 +6019,6 @@ "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dev": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -6037,9 +6057,10 @@ } }, "node_modules/flatbuffers": { - "version": "1.12.0", - "resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-1.12.0.tgz", - "integrity": "sha512-c7CZADjRcl6j0PlvFy0ZqXQ67qSEZfrVPynmnL+2zPc+NtMvrF8Y0QceMo7QqnSPc7+uWjUIAbvCQ5WIKlMVdQ==" + "version": "25.1.24", + "resolved": "https://registry.npmjs.org/flatbuffers/-/flatbuffers-25.1.24.tgz", + "integrity": "sha512-Ni+KCqYquU30UEgGkrrwpbYtUcUmNuLFcQ5Xdy9DK7WUaji+AAov+Bf12FEYmu0eI15y31oD38utnBexe0cAYA==", + "license": "Apache-2.0" }, "node_modules/flatted": { "version": "3.3.1", @@ -6110,7 +6131,6 @@ "version": "11.2.0", "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", - "dev": true, "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", @@ -6238,6 +6258,7 @@ "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", "dev": true, + "license": "MIT", "engines": { "node": "*" } @@ -6429,8 +6450,7 @@ "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" }, "node_modules/graphemer": { "version": "1.4.0", @@ -6441,7 +6461,8 @@ "node_modules/guid-typescript": { "version": "1.0.9", "resolved": "https://registry.npmjs.org/guid-typescript/-/guid-typescript-1.0.9.tgz", - "integrity": "sha512-Y8T4vYhEfwJOTbouREvG+3XDsjr8E3kIr7uf+JZ0BYloFsttiHU0WfvANVsR7TxNUJa/WpCnw/Ino/p+DeBhBQ==" + "integrity": "sha512-Y8T4vYhEfwJOTbouREvG+3XDsjr8E3kIr7uf+JZ0BYloFsttiHU0WfvANVsR7TxNUJa/WpCnw/Ino/p+DeBhBQ==", + "license": "ISC" }, "node_modules/gulp-sort": { "version": "2.0.0", @@ -6809,7 +6830,6 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dev": true, "dependencies": { "binary-extensions": "^2.0.0" }, @@ -6858,7 +6878,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -6875,7 +6894,6 @@ "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, "dependencies": { "is-extglob": "^2.1.1" }, @@ -6917,7 +6935,6 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, "engines": { "node": ">=0.12.0" } @@ -7097,7 +7114,6 @@ "version": "6.1.0", "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, "dependencies": { "universalify": "^2.0.0" }, @@ -7172,6 +7188,16 @@ "integrity": "sha512-tBECoUqNFbyAY4RrbqsBQqDFpGXAEbdD5QKr8kACx3+rnArmuuR22nKQWKazvp07N9yjTyDZaw/20UIH8tL9DQ==", "dev": true }, + "node_modules/kokoro-js": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/kokoro-js/-/kokoro-js-1.1.1.tgz", + "integrity": "sha512-cyLO34iI8nBJXPnd3fI4fGeQGS+a6Uatg7eXNL6QS8TLSxaa30WD6Fj7/XoIZYaHg8q6d+TCrui/f74MTY2g1g==", + "license": "Apache-2.0", + "dependencies": { + "@huggingface/transformers": "^3.3.3", + "phonemizer": "^1.2.1" + } + }, "node_modules/layout-base": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", @@ -7455,15 +7481,17 @@ } }, "node_modules/long": { - "version": "5.2.3", - "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", - "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==" + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.4.tgz", + "integrity": "sha512-qtzLbJE8hq7VabR3mISmVGtoXP8KGc2Z/AT8OuqlYD7JTR3oqrgwdjnk07wpj1twXxYmgDXgoKVWUG/fReSzHg==", + "license": "Apache-2.0" }, "node_modules/loupe": { "version": "2.3.7", "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.7.tgz", "integrity": "sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==", "dev": true, + "license": "MIT", "dependencies": { "get-func-name": "^2.0.1" } @@ -7609,7 +7637,6 @@ "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true, "engines": { "node": ">= 8" } @@ -8066,7 +8093,6 @@ "version": "4.0.8", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "dev": true, "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" @@ -8150,6 +8176,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.1.tgz", "integrity": "sha512-umcy022ILvb5/3Djuu8LWeqUa8D68JaBzlttKeMWen48SjabqS3iY5w/vzeMzMUNhLDifyhbOwKDSznB1vvrwg==", + "license": "MIT", "dependencies": { "minipass": "^7.0.4", "rimraf": "^5.0.5" @@ -8162,6 +8189,7 @@ "version": "10.4.5", "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "license": "ISC", "dependencies": { "foreground-child": "^3.1.0", "jackspeak": "^3.1.2", @@ -8181,6 +8209,7 @@ "version": "3.4.3", "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "license": "BlueOak-1.0.0", "dependencies": { "@isaacs/cliui": "^8.0.2" }, @@ -8195,6 +8224,7 @@ "version": "9.0.5", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", "dependencies": { "brace-expansion": "^2.0.1" }, @@ -8209,6 +8239,7 @@ "version": "5.0.10", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.10.tgz", "integrity": "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==", + "license": "ISC", "dependencies": { "glob": "^10.3.7" }, @@ -8329,7 +8360,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -8433,42 +8463,46 @@ } }, "node_modules/onnxruntime-common": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.19.2.tgz", - "integrity": "sha512-a4R7wYEVFbZBlp0BfhpbFWqe4opCor3KM+5Wm22Az3NGDcQMiU2hfG/0MfnBs+1ZrlSGmlgWeMcXQkDk1UFb8Q==" + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.20.1.tgz", + "integrity": "sha512-YiU0s0IzYYC+gWvqD1HzLc46Du1sXpSiwzKb63PACIJr6LfL27VsXSXQvt68EzD3V0D5Bc0vyJTjmMxp0ylQiw==", + "license": "MIT" }, "node_modules/onnxruntime-node": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.19.2.tgz", - "integrity": "sha512-9eHMP/HKbbeUcqte1JYzaaRC8JPn7ojWeCeoyShO86TOR97OCyIyAIOGX3V95ErjslVhJRXY8Em/caIUc0hm1Q==", + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.20.1.tgz", + "integrity": "sha512-di/I4HDXRw+FLgq+TyHmQEDd3cEp9iFFZm0r4uJ1Wd7b/WE1VXtKWo8yemex347c6GNF/3Pv86ZfPhIWxORr0w==", "hasInstallScript": true, + "license": "MIT", "os": [ "win32", "darwin", "linux" ], "dependencies": { - "onnxruntime-common": "1.19.2", + "onnxruntime-common": "1.20.1", "tar": "^7.0.1" } }, "node_modules/onnxruntime-web": { - "version": "1.20.0-dev.20241016-2b8fc5529b", - "resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.20.0-dev.20241016-2b8fc5529b.tgz", - "integrity": "sha512-1XovqtgqeEFtupuyzdDQo7Tqj4GRyNHzOoXjapCEo4rfH3JrXok5VtqucWfRXHPsOI5qoNxMQ9VE+drDIp6woQ==", + "version": "1.21.0-dev.20250206-d981b153d3", + "resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.21.0-dev.20250206-d981b153d3.tgz", + "integrity": "sha512-esDVQdRic6J44VBMFLumYvcGfioMh80ceLmzF1yheJyuLKq/Th8VT2aj42XWQst+2bcWnAhw4IKmRQaqzU8ugg==", + "license": "MIT", "dependencies": { - "flatbuffers": "^1.12.0", + "flatbuffers": "^25.1.24", "guid-typescript": "^1.0.9", "long": "^5.2.3", - "onnxruntime-common": "1.20.0-dev.20241016-2b8fc5529b", + "onnxruntime-common": "1.21.0-dev.20250206-d981b153d3", "platform": "^1.3.6", "protobufjs": "^7.2.4" } }, "node_modules/onnxruntime-web/node_modules/onnxruntime-common": { - "version": "1.20.0-dev.20241016-2b8fc5529b", - "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.20.0-dev.20241016-2b8fc5529b.tgz", - "integrity": "sha512-KZK8b6zCYGZFjd4ANze0pqBnqnFTS3GIVeclQpa2qseDpXrCQJfkWBixRcrZShNhm3LpFOZ8qJYFC5/qsJK9WQ==" + "version": "1.21.0-dev.20250206-d981b153d3", + "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.21.0-dev.20250206-d981b153d3.tgz", + "integrity": "sha512-TwaE51xV9q2y8pM61q73rbywJnusw9ivTEHAJ39GVWNZqxCoDBpe/tQkh/w9S+o/g+zS7YeeL0I/2mEWd+dgyA==", + "license": "MIT" }, "node_modules/optionator": { "version": "0.9.3", @@ -8546,7 +8580,8 @@ "node_modules/package-json-from-dist": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", - "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==" + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "license": "BlueOak-1.0.0" }, "node_modules/paneforge": { "version": "0.0.6", @@ -8686,6 +8721,7 @@ "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", "dev": true, + "license": "MIT", "engines": { "node": "*" } @@ -8728,6 +8764,12 @@ "@types/estree": "*" } }, + "node_modules/phonemizer": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/phonemizer/-/phonemizer-1.2.1.tgz", + "integrity": "sha512-v0KJ4mi2T4Q7eJQ0W15Xd4G9k4kICSXE8bpDeJ8jisL4RyJhNWsweKTOi88QXFc4r4LZlz5jVL5lCHhkpdT71A==", + "license": "Apache-2.0" + }, "node_modules/picocolors": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz", @@ -8781,7 +8823,8 @@ "node_modules/platform": { "version": "1.3.6", "resolved": "https://registry.npmjs.org/platform/-/platform-1.3.6.tgz", - "integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==" + "integrity": "sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==", + "license": "MIT" }, "node_modules/polyscript": { "version": "0.12.8", @@ -9064,6 +9107,7 @@ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dev": true, + "license": "MIT", "dependencies": { "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", @@ -9078,6 +9122,7 @@ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, @@ -9314,6 +9359,7 @@ "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.4.0.tgz", "integrity": "sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==", "hasInstallScript": true, + "license": "BSD-3-Clause", "dependencies": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", @@ -9413,7 +9459,6 @@ "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true, "funding": [ { "type": "github", @@ -9504,7 +9549,8 @@ "version": "18.3.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/read-cache": { "version": "1.0.0", @@ -9534,7 +9580,6 @@ "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "dev": true, "dependencies": { "picomatch": "^2.2.1" }, @@ -9637,7 +9682,6 @@ "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true, "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" @@ -9763,7 +9807,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, "funding": [ { "type": "github", @@ -11131,6 +11174,7 @@ "version": "7.4.3", "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "license": "ISC", "dependencies": { "@isaacs/fs-minipass": "^4.0.0", "chownr": "^3.0.0", @@ -11147,6 +11191,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "license": "MIT", "bin": { "mkdirp": "dist/cjs/src/bin.js" }, @@ -11247,6 +11292,7 @@ "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.1.tgz", "integrity": "sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==", "dev": true, + "license": "MIT", "engines": { "node": ">=14.0.0" } @@ -11277,7 +11323,6 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, "dependencies": { "is-number": "^7.0.0" }, @@ -11406,10 +11451,11 @@ "integrity": "sha512-fLIydlJy7IG9XL4wjRwEcKhxx/ekLXiWiMvcGo01cOMF+TN+5ZqajM1mRNRz2bNNi1bzou2yofhjZEQi7kgl9A==" }, "node_modules/type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz", + "integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==", "dev": true, + "license": "MIT", "engines": { "node": ">=4" } @@ -11484,7 +11530,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, "engines": { "node": ">= 10.0.0" } @@ -11749,10 +11794,11 @@ } }, "node_modules/vite-node": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.6.0.tgz", - "integrity": "sha512-de6HJgzC+TFzOu0NTC4RAIsyf/DY/ibWDYQUcuEA84EMHhcefTUGkjFHKKEJhQN4A+6I0u++kr3l36ZF2d7XRw==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.6.1.tgz", + "integrity": "sha512-YAXkfvGtuTzwWbDSACdJSg4A4DZiAqckWe90Zapc/sEX3XvHcw1NdurM/6od8J207tSDqNbSsgdCacBgvJKFuA==", "dev": true, + "license": "MIT", "dependencies": { "cac": "^6.7.14", "debug": "^4.3.4", @@ -11770,6 +11816,24 @@ "url": "https://opencollective.com/vitest" } }, + "node_modules/vite-plugin-static-copy": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/vite-plugin-static-copy/-/vite-plugin-static-copy-2.2.0.tgz", + "integrity": "sha512-ytMrKdR9iWEYHbUxs6x53m+MRl4SJsOSoMu1U1+Pfg0DjPeMlsRVx3RR5jvoonineDquIue83Oq69JvNsFSU5w==", + "license": "MIT", + "dependencies": { + "chokidar": "^3.5.3", + "fast-glob": "^3.2.11", + "fs-extra": "^11.1.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "vite": "^5.0.0 || ^6.0.0" + } + }, "node_modules/vite/node_modules/@esbuild/aix-ppc64": { "version": "0.21.5", "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", @@ -12166,16 +12230,17 @@ } }, "node_modules/vitest": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.6.0.tgz", - "integrity": "sha512-H5r/dN06swuFnzNFhq/dnz37bPXnq8xB2xB5JOVk8K09rUtoeNN+LHWkoQ0A/i3hvbUKKcCei9KpbxqHMLhLLA==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.6.1.tgz", + "integrity": "sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag==", "dev": true, + "license": "MIT", "dependencies": { - "@vitest/expect": "1.6.0", - "@vitest/runner": "1.6.0", - "@vitest/snapshot": "1.6.0", - "@vitest/spy": "1.6.0", - "@vitest/utils": "1.6.0", + "@vitest/expect": "1.6.1", + "@vitest/runner": "1.6.1", + "@vitest/snapshot": "1.6.1", + "@vitest/spy": "1.6.1", + "@vitest/utils": "1.6.1", "acorn-walk": "^8.3.2", "chai": "^4.3.10", "debug": "^4.3.4", @@ -12189,7 +12254,7 @@ "tinybench": "^2.5.1", "tinypool": "^0.8.3", "vite": "^5.0.0", - "vite-node": "1.6.0", + "vite-node": "1.6.1", "why-is-node-running": "^2.2.2" }, "bin": { @@ -12204,8 +12269,8 @@ "peerDependencies": { "@edge-runtime/vm": "*", "@types/node": "^18.0.0 || >=20.0.0", - "@vitest/browser": "1.6.0", - "@vitest/ui": "1.6.0", + "@vitest/browser": "1.6.1", + "@vitest/ui": "1.6.1", "happy-dom": "*", "jsdom": "*" }, @@ -12567,6 +12632,7 @@ "version": "5.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "license": "BlueOak-1.0.0", "engines": { "node": ">=18" } diff --git a/package.json b/package.json index 02228ee8bc..9a481f0577 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.5.10", + "version": "0.5.11", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", @@ -47,7 +47,7 @@ "tslib": "^2.4.1", "typescript": "^5.5.4", "vite": "^5.4.14", - "vitest": "^1.6.0" + "vitest": "^1.6.1" }, "type": "module", "dependencies": { @@ -85,6 +85,7 @@ "idb": "^7.1.1", "js-sha256": "^0.10.1", "katex": "^0.16.21", + "kokoro-js": "^1.1.1", "marked": "^9.1.0", "mermaid": "^10.9.3", "paneforge": "^0.0.6", @@ -105,7 +106,8 @@ "svelte-sonner": "^0.3.19", "tippy.js": "^6.3.7", "turndown": "^7.2.0", - "uuid": "^9.0.1" + "uuid": "^9.0.1", + "vite-plugin-static-copy": "^2.2.0" }, "engines": { "node": ">=18.13.0 <=22.x.x", diff --git a/pyproject.toml b/pyproject.toml index f121089e8f..f4261ba82a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,6 +40,9 @@ dependencies = [ "argon2-cffi==23.1.0", "APScheduler==3.10.4", + + "RestrictedPython==8.0", + "openai", "anthropic", "google-generativeai==0.7.2", @@ -52,7 +55,7 @@ dependencies = [ "chromadb==0.6.2", "pymilvus==2.5.0", "qdrant-client~=1.12.0", - "opensearch-py==2.7.1", + "opensearch-py==2.8.0", "transformers", "sentence-transformers==3.3.1", @@ -82,7 +85,7 @@ dependencies = [ "rapidocr-onnxruntime==1.3.24", "rank-bm25==0.2.2", - "faster-whisper==1.0.3", + "faster-whisper==1.1.1", "PyJWT[crypto]==2.10.1", "authlib==1.4.1", @@ -94,7 +97,7 @@ dependencies = [ "extract_msg", "pydub", - "duckduckgo-search~=7.3.0", + "duckduckgo-search~=7.3.2", "google-api-python-client", "google-auth-httplib2", diff --git a/src/lib/apis/configs/index.ts b/src/lib/apis/configs/index.ts index e9faf346bc..d7f02564ce 100644 --- a/src/lib/apis/configs/index.ts +++ b/src/lib/apis/configs/index.ts @@ -58,6 +58,120 @@ export const exportConfig = async (token: string) => { return res; }; +export const getDirectConnectionsConfig = async (token: string) => { + let error = null; + + const res = await fetch(`${WEBUI_API_BASE_URL}/configs/direct_connections`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${token}` + } + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + console.log(err); + error = err.detail; + return null; + }); + + if (error) { + throw error; + } + + return res; +}; + +export const setDirectConnectionsConfig = async (token: string, config: object) => { + let error = null; + + const res = await fetch(`${WEBUI_API_BASE_URL}/configs/direct_connections`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${token}` + }, + body: JSON.stringify({ + ...config + }) + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + console.log(err); + error = err.detail; + return null; + }); + + if (error) { + throw error; + } + + return res; +}; + +export const getCodeInterpreterConfig = async (token: string) => { + let error = null; + + const res = await fetch(`${WEBUI_API_BASE_URL}/configs/code_interpreter`, { + method: 'GET', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${token}` + } + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + console.log(err); + error = err.detail; + return null; + }); + + if (error) { + throw error; + } + + return res; +}; + +export const setCodeInterpreterConfig = async (token: string, config: object) => { + let error = null; + + const res = await fetch(`${WEBUI_API_BASE_URL}/configs/code_interpreter`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${token}` + }, + body: JSON.stringify({ + ...config + }) + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + console.log(err); + error = err.detail; + return null; + }); + + if (error) { + throw error; + } + + return res; +}; + export const getModelsConfig = async (token: string) => { let error = null; diff --git a/src/lib/apis/index.ts b/src/lib/apis/index.ts index c7fd78819c..3fb4a5d01b 100644 --- a/src/lib/apis/index.ts +++ b/src/lib/apis/index.ts @@ -1,6 +1,11 @@ import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants'; +import { getOpenAIModelsDirect } from './openai'; -export const getModels = async (token: string = '', base: boolean = false) => { +export const getModels = async ( + token: string = '', + connections: object | null = null, + base: boolean = false +) => { let error = null; const res = await fetch(`${WEBUI_BASE_URL}/api/models${base ? '/base' : ''}`, { method: 'GET', @@ -25,6 +30,111 @@ export const getModels = async (token: string = '', base: boolean = false) => { } let models = res?.data ?? []; + + if (connections && !base) { + let localModels = []; + + if (connections) { + const OPENAI_API_BASE_URLS = connections.OPENAI_API_BASE_URLS; + const OPENAI_API_KEYS = connections.OPENAI_API_KEYS; + const OPENAI_API_CONFIGS = connections.OPENAI_API_CONFIGS; + + const requests = []; + for (const idx in OPENAI_API_BASE_URLS) { + const url = OPENAI_API_BASE_URLS[idx]; + + if (idx.toString() in OPENAI_API_CONFIGS) { + const apiConfig = OPENAI_API_CONFIGS[idx.toString()] ?? {}; + + const enable = apiConfig?.enable ?? true; + const modelIds = apiConfig?.model_ids ?? []; + + if (enable) { + if (modelIds.length > 0) { + const modelList = { + object: 'list', + data: modelIds.map((modelId) => ({ + id: modelId, + name: modelId, + owned_by: 'openai', + openai: { id: modelId }, + urlIdx: idx + })) + }; + + requests.push( + (async () => { + return modelList; + })() + ); + } else { + requests.push( + (async () => { + return await getOpenAIModelsDirect(url, OPENAI_API_KEYS[idx]) + .then((res) => { + return res; + }) + .catch((err) => { + return { + object: 'list', + data: [], + urlIdx: idx + }; + }); + })() + ); + } + } else { + requests.push( + (async () => { + return { + object: 'list', + data: [], + urlIdx: idx + }; + })() + ); + } + } + } + + const responses = await Promise.all(requests); + + for (const idx in responses) { + const response = responses[idx]; + const apiConfig = OPENAI_API_CONFIGS[idx.toString()] ?? {}; + + let models = Array.isArray(response) ? response : (response?.data ?? []); + models = models.map((model) => ({ ...model, openai: { id: model.id }, urlIdx: idx })); + + const prefixId = apiConfig.prefix_id; + if (prefixId) { + for (const model of models) { + model.id = `${prefixId}.${model.id}`; + } + } + + localModels = localModels.concat(models); + } + } + + models = models.concat( + localModels.map((model) => ({ + ...model, + name: model?.name ?? model?.id, + direct: true + })) + ); + + // Remove duplicates + const modelsMap = {}; + for (const model of models) { + modelsMap[model.id] = model; + } + + models = Object.values(modelsMap); + } + return models; }; diff --git a/src/lib/apis/openai/index.ts b/src/lib/apis/openai/index.ts index a801bcdbbf..bab2d6e36a 100644 --- a/src/lib/apis/openai/index.ts +++ b/src/lib/apis/openai/index.ts @@ -208,6 +208,33 @@ export const updateOpenAIKeys = async (token: string = '', keys: string[]) => { return res.OPENAI_API_KEYS; }; +export const getOpenAIModelsDirect = async (url: string, key: string) => { + let error = null; + + const res = await fetch(`${url}/models`, { + method: 'GET', + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json', + ...(key && { authorization: `Bearer ${key}` }) + } + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + error = `OpenAI: ${err?.error?.message ?? 'Network Problem'}`; + return []; + }); + + if (error) { + throw error; + } + + return res; +}; + export const getOpenAIModels = async (token: string, urlIdx?: number) => { let error = null; @@ -241,33 +268,62 @@ export const getOpenAIModels = async (token: string, urlIdx?: number) => { export const verifyOpenAIConnection = async ( token: string = '', url: string = 'https://api.openai.com/v1', - key: string = '' + key: string = '', + direct: boolean = false ) => { + if (!url) { + throw 'OpenAI: URL is required'; + } + let error = null; + let res = null; - const res = await fetch(`${OPENAI_API_BASE_URL}/verify`, { - method: 'POST', - headers: { - Accept: 'application/json', - Authorization: `Bearer ${token}`, - 'Content-Type': 'application/json' - }, - body: JSON.stringify({ - url, - key + if (direct) { + res = await fetch(`${url}/models`, { + method: 'GET', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${key}`, + 'Content-Type': 'application/json' + } }) - }) - .then(async (res) => { - if (!res.ok) throw await res.json(); - return res.json(); - }) - .catch((err) => { - error = `OpenAI: ${err?.error?.message ?? 'Network Problem'}`; - return []; - }); + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + error = `OpenAI: ${err?.error?.message ?? 'Network Problem'}`; + return []; + }); - if (error) { - throw error; + if (error) { + throw error; + } + } else { + res = await fetch(`${OPENAI_API_BASE_URL}/verify`, { + method: 'POST', + headers: { + Accept: 'application/json', + Authorization: `Bearer ${token}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + url, + key + }) + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + error = `OpenAI: ${err?.error?.message ?? 'Network Problem'}`; + return []; + }); + + if (error) { + throw error; + } } return res; diff --git a/src/lib/components/admin/Settings/Connections/AddConnectionModal.svelte b/src/lib/components/AddConnectionModal.svelte similarity index 98% rename from src/lib/components/admin/Settings/Connections/AddConnectionModal.svelte rename to src/lib/components/AddConnectionModal.svelte index a8726a5466..95074c2581 100644 --- a/src/lib/components/admin/Settings/Connections/AddConnectionModal.svelte +++ b/src/lib/components/AddConnectionModal.svelte @@ -20,7 +20,9 @@ export let show = false; export let edit = false; + export let ollama = false; + export let direct = false; export let connection = null; @@ -46,9 +48,11 @@ }; const verifyOpenAIHandler = async () => { - const res = await verifyOpenAIConnection(localStorage.token, url, key).catch((error) => { - toast.error(`${error}`); - }); + const res = await verifyOpenAIConnection(localStorage.token, url, key, direct).catch( + (error) => { + toast.error(`${error}`); + } + ); if (res) { toast.success($i18n.t('Server connection verified')); diff --git a/src/lib/components/admin/Evaluations/Feedbacks.svelte b/src/lib/components/admin/Evaluations/Feedbacks.svelte index e43081302c..e73adb027b 100644 --- a/src/lib/components/admin/Evaluations/Feedbacks.svelte +++ b/src/lib/components/admin/Evaluations/Feedbacks.svelte @@ -65,7 +65,7 @@ }; const shareHandler = async () => { - toast.success($i18n.t('Redirecting you to OpenWebUI Community')); + toast.success($i18n.t('Redirecting you to Open WebUI Community')); // remove snapshot from feedbacks const feedbacksToShare = feedbacks.map((f) => { @@ -266,7 +266,7 @@ }} >
- {$i18n.t('Share to OpenWebUI Community')} + {$i18n.t('Share to Open WebUI Community')}
diff --git a/src/lib/components/admin/Evaluations/Leaderboard.svelte b/src/lib/components/admin/Evaluations/Leaderboard.svelte index 59f6df916a..07e19e9792 100644 --- a/src/lib/components/admin/Evaluations/Leaderboard.svelte +++ b/src/lib/components/admin/Evaluations/Leaderboard.svelte @@ -1,6 +1,8 @@ + +
{ + await submitHandler(); + saveHandler(); + }} +> +
+ {#if config} +
+
+ {$i18n.t('Code Interpreter')} +
+ +
+
+
+ {$i18n.t('Enable Code Interpreter')} +
+ + +
+
+ +
+
{$i18n.t('Code Interpreter Engine')}
+
+ +
+
+ + {#if config.CODE_INTERPRETER_ENGINE === 'jupyter'} +
+
+ {$i18n.t('Jupyter URL')} +
+ +
+
+ +
+
+
+ +
+
+ {$i18n.t('Jupyter Auth')} +
+ +
+ +
+
+ + {#if config.CODE_INTERPRETER_JUPYTER_AUTH} +
+
+ {#if config.CODE_INTERPRETER_JUPYTER_AUTH === 'password'} + + {:else} + + {/if} +
+
+ {/if} + {/if} +
+ +
+ +
+
+
+ {$i18n.t('Code Interpreter Prompt Template')} +
+ + +