diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml
index a1ea5c8e20..1a1f0d1f4f 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yaml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yaml
@@ -11,7 +11,7 @@ body:
## Important Notes
- - **Before submitting a bug report**: Please check the [Issues](https://github.com/open-webui/open-webui/issues) or [Discussions](https://github.com/open-webui/open-webui/discussions) sections to see if a similar issue has already been reported. If unsure, start a discussion first, as this helps us efficiently focus on improving the project.
+ - **Before submitting a bug report**: Please check the [Issues](https://github.com/open-webui/open-webui/issues) and [Discussions](https://github.com/open-webui/open-webui/discussions) sections to see if a similar issue has already been reported. If unsure, start a discussion first, as this helps us efficiently focus on improving the project. Duplicates may be closed without notice. **Please search for existing issues and discussions.**
- **Respectful collaboration**: Open WebUI is a volunteer-driven project with a single maintainer and contributors who also have full-time jobs. Please be constructive and respectful in your communication.
@@ -25,7 +25,9 @@ body:
label: Check Existing Issues
description: Confirm that you’ve checked for existing reports before submitting a new one.
options:
- - label: I have searched the existing issues and discussions.
+ - label: I have searched for any existing and/or related issues.
+ required: true
+ - label: I have searched for any existing and/or related discussions.
required: true
- label: I am using the latest version of Open WebUI.
required: true
@@ -47,7 +49,7 @@ body:
id: open-webui-version
attributes:
label: Open WebUI Version
- description: Specify the version (e.g., v0.3.11)
+ description: Specify the version (e.g., v0.6.26)
validations:
required: true
@@ -63,7 +65,7 @@ body:
id: operating-system
attributes:
label: Operating System
- description: Specify the OS (e.g., Windows 10, macOS Sonoma, Ubuntu 22.04)
+ description: Specify the OS (e.g., Windows 10, macOS Sonoma, Ubuntu 22.04, Debian 12)
validations:
required: true
@@ -126,6 +128,7 @@ body:
description: |
Please provide a **very detailed, step-by-step guide** to reproduce the issue. Your instructions should be so clear and precise that anyone can follow them without guesswork. Include every relevant detail—settings, configuration options, exact commands used, values entered, and any prerequisites or environment variables.
**If full reproduction steps and all relevant settings are not provided, your issue may not be addressed.**
+ **If your steps to reproduction are incomplete, lacking detail or not reproducible, your issue can not be addressed.**
placeholder: |
Example (include every detail):
@@ -163,5 +166,5 @@ body:
attributes:
value: |
## Note
- If the bug report is incomplete or does not follow instructions, it may not be addressed. Ensure that you've followed all the **README.md** and **troubleshooting.md** guidelines, and provide all necessary information for us to reproduce the issue.
+ **If the bug report is incomplete, does not follow instructions or is lacking details it may not be addressed.** Ensure that you've followed all the **README.md** and **troubleshooting.md** guidelines, and provide all necessary information for us to reproduce the issue.
Thank you for contributing to Open WebUI!
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index ed93957ea4..1c83fd305b 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -12,12 +12,6 @@ updates:
interval: monthly
target-branch: 'dev'
- - package-ecosystem: npm
- directory: '/'
- schedule:
- interval: monthly
- target-branch: 'dev'
-
- package-ecosystem: 'github-actions'
directory: '/'
schedule:
diff --git a/.github/workflows/build-release.yml b/.github/workflows/build-release.yml
index 443d904199..7d5e30e23e 100644
--- a/.github/workflows/build-release.yml
+++ b/.github/workflows/build-release.yml
@@ -11,7 +11,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Check for changes in package.json
run: |
diff --git a/.github/workflows/deploy-to-hf-spaces.yml b/.github/workflows/deploy-to-hf-spaces.yml
index 7fc66acf5c..a30046af89 100644
--- a/.github/workflows/deploy-to-hf-spaces.yml
+++ b/.github/workflows/deploy-to-hf-spaces.yml
@@ -27,7 +27,7 @@ jobs:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
lfs: true
diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml
index 821ffb7206..a8f9266e9d 100644
--- a/.github/workflows/docker-build.yaml
+++ b/.github/workflows/docker-build.yaml
@@ -43,7 +43,7 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -142,7 +142,7 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -244,7 +244,7 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -347,7 +347,7 @@ jobs:
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -419,6 +419,108 @@ jobs:
if-no-files-found: error
retention-days: 1
+ build-slim-image:
+ runs-on: ${{ matrix.runner }}
+ permissions:
+ contents: read
+ packages: write
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - platform: linux/amd64
+ runner: ubuntu-latest
+ - platform: linux/arm64
+ runner: ubuntu-24.04-arm
+
+ steps:
+ # GitHub Packages requires the entire repository name to be in lowercase
+ # although the repository owner has a lowercase username, this prevents some people from running actions after forking
+ - name: Set repository and image name to lowercase
+ run: |
+ echo "IMAGE_NAME=${IMAGE_NAME,,}" >>${GITHUB_ENV}
+ echo "FULL_IMAGE_NAME=ghcr.io/${IMAGE_NAME,,}" >>${GITHUB_ENV}
+ env:
+ IMAGE_NAME: '${{ github.repository }}'
+
+ - name: Prepare
+ run: |
+ platform=${{ matrix.platform }}
+ echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
+
+ - name: Checkout repository
+ uses: actions/checkout@v5
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Log in to the Container registry
+ uses: docker/login-action@v3
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Extract metadata for Docker images (slim tag)
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.FULL_IMAGE_NAME }}
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=sha,prefix=git-
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=slim
+ flavor: |
+ latest=${{ github.ref == 'refs/heads/main' }}
+ suffix=-slim,onlatest=true
+
+ - name: Extract metadata for Docker cache
+ id: cache-meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.FULL_IMAGE_NAME }}
+ tags: |
+ type=ref,event=branch
+ ${{ github.ref_type == 'tag' && 'type=raw,value=main' || '' }}
+ flavor: |
+ prefix=cache-slim-${{ matrix.platform }}-
+ latest=false
+
+ - name: Build Docker image (slim)
+ uses: docker/build-push-action@v5
+ id: build
+ with:
+ context: .
+ push: true
+ platforms: ${{ matrix.platform }}
+ labels: ${{ steps.meta.outputs.labels }}
+ outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
+ cache-from: type=registry,ref=${{ steps.cache-meta.outputs.tags }}
+ cache-to: type=registry,ref=${{ steps.cache-meta.outputs.tags }},mode=max
+ build-args: |
+ BUILD_HASH=${{ github.sha }}
+ USE_SLIM=true
+
+ - name: Export digest
+ run: |
+ mkdir -p /tmp/digests
+ digest="${{ steps.build.outputs.digest }}"
+ touch "/tmp/digests/${digest#sha256:}"
+
+ - name: Upload digest
+ uses: actions/upload-artifact@v4
+ with:
+ name: digests-slim-${{ env.PLATFORM_PAIR }}
+ path: /tmp/digests/*
+ if-no-files-found: error
+ retention-days: 1
+
merge-main-images:
runs-on: ubuntu-latest
needs: [build-main-image]
@@ -433,7 +535,7 @@ jobs:
IMAGE_NAME: '${{ github.repository }}'
- name: Download digests
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v5
with:
pattern: digests-main-*
path: /tmp/digests
@@ -487,7 +589,7 @@ jobs:
IMAGE_NAME: '${{ github.repository }}'
- name: Download digests
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v5
with:
pattern: digests-cuda-*
path: /tmp/digests
@@ -543,7 +645,7 @@ jobs:
IMAGE_NAME: '${{ github.repository }}'
- name: Download digests
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v5
with:
pattern: digests-cuda126-*
path: /tmp/digests
@@ -599,7 +701,7 @@ jobs:
IMAGE_NAME: '${{ github.repository }}'
- name: Download digests
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v5
with:
pattern: digests-ollama-*
path: /tmp/digests
@@ -640,3 +742,59 @@ jobs:
- name: Inspect image
run: |
docker buildx imagetools inspect ${{ env.FULL_IMAGE_NAME }}:${{ steps.meta.outputs.version }}
+
+ merge-slim-images:
+ runs-on: ubuntu-latest
+ needs: [build-slim-image]
+ steps:
+ # GitHub Packages requires the entire repository name to be in lowercase
+ # although the repository owner has a lowercase username, this prevents some people from running actions after forking
+ - name: Set repository and image name to lowercase
+ run: |
+ echo "IMAGE_NAME=${IMAGE_NAME,,}" >>${GITHUB_ENV}
+ echo "FULL_IMAGE_NAME=ghcr.io/${IMAGE_NAME,,}" >>${GITHUB_ENV}
+ env:
+ IMAGE_NAME: '${{ github.repository }}'
+
+ - name: Download digests
+ uses: actions/download-artifact@v5
+ with:
+ pattern: digests-slim-*
+ path: /tmp/digests
+ merge-multiple: true
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Log in to the Container registry
+ uses: docker/login-action@v3
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Extract metadata for Docker images (default slim tag)
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.FULL_IMAGE_NAME }}
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=sha,prefix=git-
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=slim
+ flavor: |
+ latest=${{ github.ref == 'refs/heads/main' }}
+ suffix=-slim,onlatest=true
+
+ - name: Create manifest list and push
+ working-directory: /tmp/digests
+ run: |
+ docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
+ $(printf '${{ env.FULL_IMAGE_NAME }}@sha256:%s ' *)
+
+ - name: Inspect image
+ run: |
+ docker buildx imagetools inspect ${{ env.FULL_IMAGE_NAME }}:${{ steps.meta.outputs.version }}
diff --git a/.github/workflows/format-backend.yaml b/.github/workflows/format-backend.yaml
index 1bcdd92c1d..56074a84f4 100644
--- a/.github/workflows/format-backend.yaml
+++ b/.github/workflows/format-backend.yaml
@@ -30,7 +30,7 @@ jobs:
- 3.12.x
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Set up Python
uses: actions/setup-python@v5
diff --git a/.github/workflows/format-build-frontend.yaml b/.github/workflows/format-build-frontend.yaml
index 15dc53cc63..df961ca3f5 100644
--- a/.github/workflows/format-build-frontend.yaml
+++ b/.github/workflows/format-build-frontend.yaml
@@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Setup Node.js
uses: actions/setup-node@v4
@@ -51,7 +51,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Setup Node.js
uses: actions/setup-node@v4
diff --git a/.github/workflows/release-pypi.yml b/.github/workflows/release-pypi.yml
index fd1adab3a9..c4ae97422d 100644
--- a/.github/workflows/release-pypi.yml
+++ b/.github/workflows/release-pypi.yml
@@ -16,7 +16,7 @@ jobs:
id-token: write
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Install Git
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4f7619c7c1..2af109cb38 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,102 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [0.6.27] - 2025-09-09
+
+### Added
+
+- 📁 Emoji folder icons were added, allowing users to personalize workspace organization with visual cues, including improved chevron display. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/1588f42fe777ad5d807e3f2fc8dbbc47a8db87c0), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/b70c0f36c0f5bbfc2a767429984d6fba1a7bb26c), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/11dea8795bfce42aa5d8d58ef316ded05173bd87), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/c0a47169fa059154d5f5a9ea6b94f9a66d82f255)
+- 📁 The 'Search Collection' input field now dynamically displays the total number of files within the knowledge base. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/fbbe1117ae4c9c8fec6499d790eee275818eccc5)
+- ☁️ A provider toggle in connection settings now allows users to manually specify Azure OpenAI deployments. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/5bdd334b74fbd154085f2d590f4afdba32469c8a)
+- ⚡ Model list caching performance was optimized by fixing cache key generation to reduce redundant API calls. [#17158](https://github.com/open-webui/open-webui/pull/17158)
+- 🎨 Azure OpenAI image generation is now supported, with configurations for IMAGES_OPENAI_API_VERSION via environment variable and admin UI. [#17147](https://github.com/open-webui/open-webui/pull/17147), [#16274](https://github.com/open-webui/open-webui/discussions/16274), [Docs:#679](https://github.com/open-webui/docs/pull/679)
+- ⚡ Comprehensive N+1 query performance is optimized by reducing database queries from 1+N to 1+1 patterns across major listing endpoints. [#17165](https://github.com/open-webui/open-webui/pull/17165), [#17160](https://github.com/open-webui/open-webui/pull/17160), [#17161](https://github.com/open-webui/open-webui/pull/17161), [#17162](https://github.com/open-webui/open-webui/pull/17162), [#17159](https://github.com/open-webui/open-webui/pull/17159), [#17166](https://github.com/open-webui/open-webui/pull/17166)
+- ⚡ The PDF.js library is now dynamically loaded, significantly reducing initial page load size and improving responsiveness. [#17222](https://github.com/open-webui/open-webui/pull/17222)
+- ⚡ The heic2any library is now dynamically loaded across various message input components, including channels, for faster page loads. [#17225](https://github.com/open-webui/open-webui/pull/17225), [#17229](https://github.com/open-webui/open-webui/pull/17229)
+- 📚 The knowledge API now supports a "delete_file" query parameter, allowing configurable file deletion behavior. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/22c4ef4fb096498066b73befe993ae3a82f7a8e7)
+- 📊 Llama.cpp timing statistics are now integrated into the usage field for comprehensive model performance metrics. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/e830b4959ecd4b2795e29e53026984a58a7696a9)
+- 🗄️ The PGVECTOR_CREATE_EXTENSION environment variable now allows control over automatic pgvector extension creation. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/c2b4976c82d335ed524bd80dc914b5e2f5bfbd9e), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/b45219c8b15b48d5ee3d42983e1107bbcefbab01), [Docs:#672](https://github.com/open-webui/docs/pull/672)
+- 🔒 Comprehensive server-side OAuth token management was implemented, securely storing encrypted tokens in a new database table and introducing an automatic refresh mechanism, enabling seamless and secure forwarding of valid user-specific OAuth tokens to downstream services, including OpenAI-compatible endpoints and external tool servers via the new "system_oauth" authentication type, resolving long-standing issues such as large token size limitations, stale/expired tokens, and reliable token propagation, and enhancing overall security by minimizing client-side token exposure, configurable via "ENABLE_OAUTH_ID_TOKEN_COOKIE" and "OAUTH_SESSION_TOKEN_ENCRYPTION_KEY" environment variables. [Docs:#683](https://github.com/open-webui/docs/pull/683), [#17210](https://github.com/open-webui/open-webui/pull/17210), [#8957](https://github.com/open-webui/open-webui/discussions/8957), [#11029](https://github.com/open-webui/open-webui/discussions/11029), [#17178](https://github.com/open-webui/open-webui/issues/17178), [#17183](https://github.com/open-webui/open-webui/issues/17183), [Commit](https://github.com/open-webui/open-webui/commit/217f4daef09b36d3d4cc4681e11d3ebd9984a1a5), [Commit](https://github.com/open-webui/open-webui/commit/fc11e4384fe98fac659e10596f67c23483578867), [Commit](https://github.com/open-webui/open-webui/commit/f11bdc6ab5dd5682bb3e27166e77581f5b8af3e0), [Commit](https://github.com/open-webui/open-webui/commit/f71834720e623761d972d4d740e9bbd90a3a86c6), [Commit](https://github.com/open-webui/open-webui/commit/b5bb6ae177dcdc4e8274d7e5ffa50bc8099fd466), [Commit](https://github.com/open-webui/open-webui/commit/b786d1e3f3308ef4f0f95d7130ddbcaaca4fc927), [Commit](https://github.com/open-webui/open-webui/commit/8a9f8627017bd0a74cbd647891552b26e56aabb7), [Commit](https://github.com/open-webui/open-webui/commit/30d1dc2c60e303756120fe1c5538968c4e6139f4), [Commit](https://github.com/open-webui/open-webui/commit/2b2d123531eb3f42c0e940593832a64e2806240d), [Commit](https://github.com/open-webui/open-webui/commit/6f6412dd16c63c2bb4df79a96b814bf69cb3f880)
+- 🔒 Conditional Permission Hardening for OpenShift Deployments: Added a build argument to enable optional permission hardening for OpenShift and container environments. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/0ebe4f8f8490451ac8e85a4846f010854d9b54e5)
+- 👥 Regex pattern support is added for OAuth blocked groups, allowing more flexible group filtering rules. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/df66e21472646648d008ebb22b0e8d5424d491df)
+- 💬 Web search result display was enhanced to include titles and favicons, providing a clearer overview of search sources. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/33f04a771455e3fabf8f0e8ebb994ae7f41b8ed4), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/0a85dd4bca23022729eafdbc82c8c139fa365af2), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/16090bc2721fde492afa2c4af5927e2b668527e1), [#17197](https://github.com/open-webui/open-webui/pull/17197), [#14179](https://github.com/open-webui/open-webui/issues/14179), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/1cdb7aed1ee9bf81f2fd0404be52dcfa64f8ed4f), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/f2525ebc447c008cf7269ef20ce04fa456f302c4), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/7f523de408ede4075349d8de71ae0214b7e1a62e), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/3d37e4a42d344051ae715ab59bd7b5718e46c343), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/cd5e2be27b613314aadda6107089331783987985), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/6dc0df247347aede2762fe2065cf30275fd137ae)
+- 💬 A new setting was added to control whether clicking a suggested prompt automatically sends the message or only inserts the text. [#17192](https://github.com/open-webui/open-webui/issues/17192), [Commit](https://github.com/open-webui/open-webui/commit/e023a98f11fc52feb21e4065ec707cc98e50c7d3)
+- 🔄 Various improvements were implemented across the frontend and backend to enhance performance, stability, and security.
+- 🌐 Translations for Portuguese (Brazil), Simplified Chinese, Catalan, and Spanish were enhanced and expanded.
+
+### Fixed
+
+- 🔍 Hybrid search functionality now correctly handles lexical-semantic weight labels and avoids errors when BM25 weight is zero. [#17049](https://github.com/open-webui/open-webui/pull/17049), [#17046](https://github.com/open-webui/open-webui/issues/17046)
+- 🛑 Task stopping errors are prevented by gracefully handling multiple stop requests for the same task. [#17195](https://github.com/open-webui/open-webui/pull/17195)
+- 🐍 Code execution package detection precision is improved in Pyodide to prevent unnecessary package inclusions. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/bbe116795860a81a647d9567e0d9cb1950650095)
+- 🛠️ Tool message format API compliance is fixed by ensuring content fields in tool call responses contain valid string values instead of null. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/37bf0087e5b8a324009c9d06b304027df351ea6b)
+- 📱 Mobile app config API authentication now supports Authorization header token verification with cookie fallback for iOS and Android requests. [#17175](https://github.com/open-webui/open-webui/pull/17175)
+- 💾 Knowledge file save race conditions are prevented by serializing API calls and adding an "isSaving" guard. [#17137](https://github.com/open-webui/open-webui/pull/17137), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/4ca936f0bf9813bee11ec8aea41d7e34fb6b16a9)
+- 🔐 The SSO login button visibility is restored for OIDC PKCE authentication without a client secret. [#17012](https://github.com/open-webui/open-webui/pull/17012)
+- 🔊 Text-to-Speech (TTS) API requests now use proper URL joining methods, ensuring reliable functionality regardless of trailing slashes in the base URL. [#17061](https://github.com/open-webui/open-webui/pull/17061)
+- 🛡️ Admin account creation on Hugging Face Spaces now correctly detects the configured port, resolving issues with custom port deployments. [#17064](https://github.com/open-webui/open-webui/pull/17064)
+- 📁 Unicode filename support is improved for external document loaders by properly URL-encoding filenames in HTTP headers. [#17013](https://github.com/open-webui/open-webui/pull/17013), [#17000](https://github.com/open-webui/open-webui/issues/17000)
+- 🔗 Web page and YouTube attachments are now correctly processed by setting their type as "text" and using collection names for accurate content retrieval. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/487979859a6ffcfd60468f523822cdf838fbef5b)
+- ✍️ Message input composition event handling is fixed to properly manage text input for multilingual users using Input Method Editors (IME). [#17085](https://github.com/open-webui/open-webui/pull/17085)
+- 💬 Follow-up tooltip duplication is removed, streamlining the user interface and preventing visual clutter. [#17186](https://github.com/open-webui/open-webui/pull/17186)
+- 🎨 Chat button text display is corrected by preventing clipping of descending characters and removing unnecessary capitalization. [#17191](https://github.com/open-webui/open-webui/pull/17191)
+- 🧠 RAG Loop/Error with Gemma 3.1 2B Instruct is fixed by correctly unwrapping unexpected single-item list responses from models. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/1bc9711afd2b72cd07c4e539a83783868733767c), [#17213](https://github.com/open-webui/open-webui/issues/17213)
+- 🖼️ HEIC conversion failures are resolved, improving robustness of image handling. [#17225](https://github.com/open-webui/open-webui/pull/17225)
+- 📦 The slim Docker image size regression has been fixed by refining the build process to correctly exclude components when USE_SLIM=true. [#16997](https://github.com/open-webui/open-webui/issues/16997), [Commit](https://github.com/open-webui/open-webui/commit/be373e9fd42ac73b0302bdb487e16dbeae178b4e), [Commit](https://github.com/open-webui/open-webui/commit/0ebe4f8f8490451ac8e85a4846f010854d9b54e5)
+- 📁 Knowledge base update validation errors are resolved, ensuring seamless management via UI or API. [#17244](https://github.com/open-webui/open-webui/issues/17244), [Commit](https://github.com/open-webui/open-webui/commit/9aac1489080a5c9441e89b1a56de0d3a672bc5fb)
+- 🔐 Resolved a security issue where a global web search setting overrode model-specific restrictions, ensuring model-level settings are now correctly prioritized. [#17151](https://github.com/open-webui/open-webui/issues/17151), [Commit](https://github.com/open-webui/open-webui/commit/9368d0ac751ec3072d5a96712b80a9b20a642ce6)
+- 🔐 OAuth redirect reliability is improved by robustly preserving the intended redirect path using session storage. [#17235](https://github.com/open-webui/open-webui/issues/17235), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/4f2b821088367da18374027919594365c7a3f459), [#15575](https://github.com/open-webui/open-webui/pull/15575), [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/d9f97c832c556fae4b116759da0177bf4fe619de)
+- 🔐 Fixed a security vulnerability where knowledge base access within chat folders persisted after permissions were revoked. [#17182](https://github.com/open-webui/open-webui/issues/17182), [Commit](https://github.com/open-webui/open-webui/commit/40e40d1dddf9ca937e99af41c8ca038dbc93a7e6)
+- 🔒 OIDC access denied errors are now displayed as user-friendly toast notifications instead of raw JSON. [#17208](https://github.com/open-webui/open-webui/issues/17208), [Commit](https://github.com/open-webui/open-webui/commit/3d6d050ad82d360adc42d6e9f42e8faf8d13c9f4)
+- 💬 Chat exception handling is enhanced to prevent system instability during message generation and ensure graceful error recovery. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/f56889c5c7f0cf1a501c05d35dfa614e4f8b6958)
+- 🔒 Static asset authentication is improved by adding crossorigin="use-credentials" attributes to all link elements, enabling proper cookie forwarding for proxy environments and authenticated requests to favicon, manifest, and stylesheet resources. [#17280](https://github.com/open-webui/open-webui/pull/17280), [Commit](https://github.com/open-webui/open-webui/commit/f17d8b5d19e1a05df7d63f53e939c99772a59c1e)
+
+### Changed
+
+- 🛠️ Renamed "Tools" to "External Tools" across the UI for clearer distinction between built-in and external functionalities. [Commit](https://github.com/open-webui/open-webui/pull/17070/commits/0bca4e230ef276bec468889e3be036242ad11086f)
+- 🛡️ Default permission validation for message regeneration and deletion actions is enhanced to provide more restrictive access controls, improving chat security and user data protection. [#17285](https://github.com/open-webui/open-webui/pull/17285)
+
+## [0.6.26] - 2025-08-28
+
+### Added
+
+- 🛂 **Granular Chat Interaction Permissions**: Added fine-grained permission controls for individual chat actions including "Continue Response", "Regenerate Response", "Rate Response", and "Delete Messages". Administrators can now configure these permissions per user group or set system defaults via environment variables, providing enhanced security and governance by preventing potential system prompt leakage through response continuation and enabling precise control over user interactions with AI responses.
+- 🧠 **Custom Reasoning Tags Configuration**: Added configurable reasoning tag detection for AI model responses, allowing administrators and users to customize how the system identifies and processes reasoning content. Users can now define custom reasoning tag pairs, use default tags like "think" and "reasoning", or disable reasoning detection entirely through the Advanced Parameters interface, providing enhanced control over AI thought process visibility.
+- 📱 **Pull-to-Refresh Support**: Added pull-to-refresh functionality allowing user to easily refresh the interface by pulling down on the navbar area. This resolves timeout issues that occurred when temporarily switching away from the app during long AI response generations, eliminating the need to close and relaunch the PWA.
+- 📁 **Configurable File Upload Processing Mode**: Added "process_in_background" query parameter to the file upload API endpoint, allowing clients to choose between asynchronous (default) and synchronous file processing. Setting "process_in_background=false" forces the upload request to wait until extraction and embedding complete, returning immediately usable files and simplifying integration for backend API consumers that prefer blocking calls over polling workflows.
+- 🔐 **Azure Document Intelligence DefaultAzureCredential Support**: Added support for authenticating with Azure Document Intelligence using DefaultAzureCredential in addition to API key authentication, enabling seamless integration with Azure Entra ID and managed identity authentication for enterprise Azure environments.
+- 🔐 **Authentication Bootstrapping Enhancements**: Added "ENABLE_INITIAL_ADMIN_SIGNUP" environment variable and "?form=true" URL parameter to enable initial admin user creation and forced login form display in SSO-only deployments. This resolves bootstrap issues where administrators couldn't create the first user when login forms were disabled, allowing proper initialization of SSO-configured deployments without requiring temporary configuration changes.
+- ⚡ **Query Generation Caching**: Added "ENABLE_QUERIES_CACHE" environment variable to enable request-scoped caching of generated search queries. When both web search and file retrieval are active, queries generated for web search are automatically reused for file retrieval, eliminating duplicate LLM API calls and reducing token usage and costs while maintaining search quality.
+- 🔧 **Configurable Tool Call Retry Limit**: Added "CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES" environment variable to control the maximum number of sequential tool calls allowed before safety stopping a chat session. This replaces the previous hardcoded limit of 10, enabling administrators to configure higher limits for complex workflows requiring extensive tool interactions.
+- 📦 **Slim Docker Image Variant**: Added new slim Docker image option via "USE_SLIM" build argument that excludes embedded AI models and Ollama installation, reducing image size by approximately 1GB. This variant enables faster image pulls and deployments for environments where AI models are managed externally, particularly beneficial for auto-scaling clusters and distributed deployments.
+- 🗂️ **Shift-to-Delete Functionality for Workspace Prompts**: Added keyboard shortcut support for quick prompt deletion on the Workspace Prompts page. Hold Shift and hover over any prompt to reveal a trash icon for instant deletion, bringing consistent interaction patterns across all workspace sections (Models, Tools, Functions, and now Prompts) and streamlining prompt management workflows.
+- ♿ **Accessibility Enhancements**: Enhanced user interface accessibility with improved keyboard navigation, ARIA labels, and screen reader compatibility across key platform components.
+- 📄 **Optimized PDF Export for Smaller File Size**: PDF exports are now significantly optimized, producing much smaller files for faster downloads and easier sharing or archiving of your chats and documents.
+- 📦 **Slimmed Default Install with Optional Full Dependencies**: Installing Open WebUI via pip now defaults to a slimmer package; PostgreSQL support is no longer included by default—simply use 'pip install open-webui[all]' to include all optional dependencies for full feature compatibility.
+- 🔄 **General Backend Refactoring**: Implemented various backend improvements to enhance performance, stability, and security, ensuring a more resilient and reliable platform for all users.
+- 🌐 **Localization & Internationalization Improvements**: Enhanced and expanded translations for Finnish, Spanish, Japanese, Polish, Portuguese (Brazil), and Chinese, including missing translations and typo corrections, providing a more natural and professional user experience for speakers of these languages across the entire interface.
+
+### Fixed
+
+- ⚠️ **Chat Error Feedback Restored**: Fixed an issue where various backend errors (tool server failures, API connection issues, malformed responses) would cause chats to load indefinitely without providing user feedback. The system now properly displays error messages when failures occur during chat generation, allowing users to understand issues and retry as needed instead of waiting indefinitely.
+- 🖼️ **Image Generation Steps Setting Visibility Fixed**: Fixed a UI issue where the "Set Steps" configuration option was incorrectly displayed for OpenAI and Gemini image generation engines that don't support this parameter. The setting is now only visible for compatible engines like ComfyUI and Automatic1111, eliminating user confusion about non-functional configuration options.
+- 📄 **Datalab Marker API Document Loader Fixed**: Fixed broken Datalab Marker API document loader functionality by correcting URL path handling for both hosted Datalab services and self-hosted Marker servers. Removed hardcoded "/marker" paths from the loader code and restored proper default URL structure, ensuring PDF and document processing works correctly with both deployment types.
+- 📋 **Citation Error Handling Improved**: Fixed an issue where malformed citation or source objects from external tools, pipes, or filters would cause JavaScript errors and make the chat interface completely unresponsive. The system now gracefully handles missing or undefined citation properties, allowing conversations to load properly even when tools generate defective citation events.
+- 👥 **Group User Add API Endpoint Fixed**: Fixed an issue where the "/api/v1/groups/id/{group_id}/users/add" API endpoint would accept requests without errors but fail to actually add users to groups. The system now properly initializes and deduplicates user ID lists, ensuring users are correctly added to and removed from groups via API calls.
+- 🛠️ **External Tool Server Error Handling Improved**: Fixed an issue where unreachable or misconfigured external tool servers would cause JavaScript errors and prevent the interface from loading properly. The system now gracefully handles connection failures, displays appropriate error messages, and filters out inaccessible servers while maintaining full functionality for working connections.
+- 📋 **Code Block Copy Button Content Fixed**: Fixed an issue where the copy button in code blocks would copy the original AI-generated code instead of any user-edited content, ensuring the copy function always captures the currently displayed code as modified by users.
+- 📄 **PDF Export Content Mismatch Fixed**: Resolved an issue where exporting a PDF of one chat while viewing another chat would incorrectly generate the PDF using the currently viewed chat's content instead of the intended chat's content. Additionally optimized the PDF generation algorithm with improved canvas slicing, better memory management, and enhanced image quality, while removing the problematic PDF export option from individual chat menus to prevent further confusion.
+- 🖱️ **Windows Sidebar Cursor Icon Corrected**: Fixed confusing cursor icons on Windows systems where sidebar toggle buttons displayed resize cursors (ew-resize) instead of appropriate pointer cursors. The sidebar buttons now show standard pointer cursors on Windows, eliminating user confusion about whether the buttons expand/collapse the sidebar or resize it.
+- 📝 **Safari IME Composition Bug Fixed**: Resolved an issue where pressing Enter while composing Chinese text using Input Method Editors (IMEs) on Safari would prematurely send messages instead of completing text composition. The system now properly detects composition states and ignores keydown events that occur immediately after composition ends, ensuring smooth multilingual text input across all browsers.
+- 🔍 **Hybrid Search Parameter Handling Fixed**: Fixed an issue where the "hybrid" parameter in collection query requests was not being properly evaluated, causing the system to ignore user-specified hybrid search preferences and only check global configuration. Additionally resolved a division by zero error that occurred in hybrid search when BM25Retriever was called with empty document lists, ensuring robust search functionality across all collection states.
+- 💬 **RTL Text Orientation in Messages Fixed**: Fixed text alignment issues in user messages and AI responses for Right-to-Left languages, ensuring proper text direction based on user language settings. Code blocks now consistently use Left-to-Right orientation regardless of the user's language preference, maintaining code readability across all supported languages.
+- 📁 **File Content Preview in Modal Restored**: Fixed an issue where clicking on uploaded files would display an empty preview modal, even when the files were successfully processed and available for AI context. File content now displays correctly in the preview modal, ensuring users can verify and review their uploaded documents as intended.
+- 🌐 **Playwright Timeout Configuration Corrected**: Fixed an issue where Playwright timeout values were incorrectly converted from milliseconds to seconds with an additional 1000x multiplier, causing excessively long web loading timeouts. The timeout parameter now correctly uses the configured millisecond values as intended, ensuring responsive web search and document loading operations.
+
+### Changed
+
+- 🔄 **Follow-Up Question Language Constraint Removed**: Follow-up question suggestions no longer strictly adhere to the chat's primary language setting, allowing for more flexible and diverse suggestion generation that may include questions in different languages based on conversation context and relevance rather than enforced language matching.
+
## [0.6.25] - 2025-08-22
### Fixed
diff --git a/Dockerfile b/Dockerfile
index 83a74365f0..ad393338d8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -3,6 +3,8 @@
# use build args in the docker build command with --build-arg="BUILDARG=true"
ARG USE_CUDA=false
ARG USE_OLLAMA=false
+ARG USE_SLIM=false
+ARG USE_PERMISSION_HARDENING=false
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
ARG USE_CUDA_VER=cu128
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
@@ -24,6 +26,9 @@ ARG GID=0
FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
ARG BUILD_HASH
+# Set Node.js options (heap limit Allocation failed - JavaScript heap out of memory)
+# ENV NODE_OPTIONS="--max-old-space-size=4096"
+
WORKDIR /app
# to store git revision in build
@@ -43,6 +48,8 @@ FROM python:3.11-slim-bookworm AS base
ARG USE_CUDA
ARG USE_OLLAMA
ARG USE_CUDA_VER
+ARG USE_SLIM
+ARG USE_PERMISSION_HARDENING
ARG USE_EMBEDDING_MODEL
ARG USE_RERANKING_MODEL
ARG UID
@@ -54,6 +61,7 @@ ENV ENV=prod \
# pass build args to the build
USE_OLLAMA_DOCKER=${USE_OLLAMA} \
USE_CUDA_DOCKER=${USE_CUDA} \
+ USE_SLIM_DOCKER=${USE_SLIM} \
USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} \
USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL}
@@ -130,11 +138,14 @@ RUN pip3 install --no-cache-dir uv && \
else \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
uv pip install --system -r requirements.txt --no-cache-dir && \
+ if [ "$USE_SLIM" != "true" ]; then \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
fi; \
- chown -R $UID:$GID /app/backend/data/
+ fi; \
+ mkdir -p /app/backend/data && chown -R $UID:$GID /app/backend/data/ && \
+ rm -rf /var/lib/apt/lists/*;
# Install Ollama if requested
RUN if [ "$USE_OLLAMA" = "true" ]; then \
@@ -163,11 +174,13 @@ HEALTHCHECK CMD curl --silent --fail http://localhost:${PORT:-8080}/health | jq
# Minimal, atomic permission hardening for OpenShift (arbitrary UID):
# - Group 0 owns /app and /root
# - Directories are group-writable and have SGID so new files inherit GID 0
-RUN set -eux; \
+RUN if [ "$USE_PERMISSION_HARDENING" = "true" ]; then \
+ set -eux; \
chgrp -R 0 /app /root || true; \
chmod -R g+rwX /app /root || true; \
find /app -type d -exec chmod g+s {} + || true; \
- find /root -type d -exec chmod g+s {} + || true
+ find /root -type d -exec chmod g+s {} + || true; \
+ fi
USER $UID:$GID
diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py
index d3b7c9314c..11698d87af 100644
--- a/backend/open_webui/config.py
+++ b/backend/open_webui/config.py
@@ -313,7 +313,7 @@ JWT_EXPIRES_IN = PersistentConfig(
####################################
ENABLE_OAUTH_PERSISTENT_CONFIG = (
- os.environ.get("ENABLE_OAUTH_PERSISTENT_CONFIG", "True").lower() == "true"
+ os.environ.get("ENABLE_OAUTH_PERSISTENT_CONFIG", "False").lower() == "true"
)
ENABLE_OAUTH_SIGNUP = PersistentConfig(
@@ -660,7 +660,7 @@ def load_oauth_providers():
if (
OAUTH_CLIENT_ID.value
- and OAUTH_CLIENT_SECRET.value
+ and (OAUTH_CLIENT_SECRET.value or OAUTH_CODE_CHALLENGE_METHOD.value)
and OPENID_PROVIDER_URL.value
):
@@ -1208,6 +1208,23 @@ USER_PERMISSIONS_CHAT_DELETE = (
os.environ.get("USER_PERMISSIONS_CHAT_DELETE", "True").lower() == "true"
)
+USER_PERMISSIONS_CHAT_DELETE_MESSAGE = (
+ os.environ.get("USER_PERMISSIONS_CHAT_DELETE_MESSAGE", "True").lower() == "true"
+)
+
+USER_PERMISSIONS_CHAT_CONTINUE_RESPONSE = (
+ os.environ.get("USER_PERMISSIONS_CHAT_CONTINUE_RESPONSE", "True").lower() == "true"
+)
+
+USER_PERMISSIONS_CHAT_REGENERATE_RESPONSE = (
+ os.environ.get("USER_PERMISSIONS_CHAT_REGENERATE_RESPONSE", "True").lower()
+ == "true"
+)
+
+USER_PERMISSIONS_CHAT_RATE_RESPONSE = (
+ os.environ.get("USER_PERMISSIONS_CHAT_RATE_RESPONSE", "True").lower() == "true"
+)
+
USER_PERMISSIONS_CHAT_EDIT = (
os.environ.get("USER_PERMISSIONS_CHAT_EDIT", "True").lower() == "true"
)
@@ -1290,6 +1307,10 @@ DEFAULT_USER_PERMISSIONS = {
"params": USER_PERMISSIONS_CHAT_PARAMS,
"file_upload": USER_PERMISSIONS_CHAT_FILE_UPLOAD,
"delete": USER_PERMISSIONS_CHAT_DELETE,
+ "delete_message": USER_PERMISSIONS_CHAT_DELETE_MESSAGE,
+ "continue_response": USER_PERMISSIONS_CHAT_CONTINUE_RESPONSE,
+ "regenerate_response": USER_PERMISSIONS_CHAT_REGENERATE_RESPONSE,
+ "rate_response": USER_PERMISSIONS_CHAT_RATE_RESPONSE,
"edit": USER_PERMISSIONS_CHAT_EDIT,
"share": USER_PERMISSIONS_CHAT_SHARE,
"export": USER_PERMISSIONS_CHAT_EXPORT,
@@ -1576,7 +1597,7 @@ FOLLOW_UP_GENERATION_PROMPT_TEMPLATE = PersistentConfig(
)
DEFAULT_FOLLOW_UP_GENERATION_PROMPT_TEMPLATE = """### Task:
-Suggest 3-5 relevant follow-up questions or prompts in the chat's primary language that the user might naturally ask next in this conversation as a **user**, based on the chat history, to help continue or deepen the discussion.
+Suggest 3-5 relevant follow-up questions or prompts that the user might naturally ask next in this conversation as a **user**, based on the chat history, to help continue or deepen the discussion.
### Guidelines:
- Write all follow-up questions from the user’s point of view, directed to the assistant.
- Make questions concise, clear, and directly related to the discussed topic(s).
@@ -1977,6 +1998,9 @@ PGVECTOR_INITIALIZE_MAX_VECTOR_LENGTH = int(
os.environ.get("PGVECTOR_INITIALIZE_MAX_VECTOR_LENGTH", "1536")
)
+PGVECTOR_CREATE_EXTENSION = (
+ os.getenv("PGVECTOR_CREATE_EXTENSION", "true").lower() == "true"
+)
PGVECTOR_PGCRYPTO = os.getenv("PGVECTOR_PGCRYPTO", "false").lower() == "true"
PGVECTOR_PGCRYPTO_KEY = os.getenv("PGVECTOR_PGCRYPTO_KEY", None)
if PGVECTOR_PGCRYPTO and not PGVECTOR_PGCRYPTO_KEY:
@@ -2208,6 +2232,18 @@ DOCLING_SERVER_URL = PersistentConfig(
os.getenv("DOCLING_SERVER_URL", "http://docling:5001"),
)
+DOCLING_DO_OCR = PersistentConfig(
+ "DOCLING_DO_OCR",
+ "rag.docling_do_ocr",
+ os.getenv("DOCLING_DO_OCR", "True").lower() == "true",
+)
+
+DOCLING_FORCE_OCR = PersistentConfig(
+ "DOCLING_FORCE_OCR",
+ "rag.docling_force_ocr",
+ os.getenv("DOCLING_FORCE_OCR", "False").lower() == "true",
+)
+
DOCLING_OCR_ENGINE = PersistentConfig(
"DOCLING_OCR_ENGINE",
"rag.docling_ocr_engine",
@@ -2220,6 +2256,24 @@ DOCLING_OCR_LANG = PersistentConfig(
os.getenv("DOCLING_OCR_LANG", "eng,fra,deu,spa"),
)
+DOCLING_PDF_BACKEND = PersistentConfig(
+ "DOCLING_PDF_BACKEND",
+ "rag.docling_pdf_backend",
+ os.getenv("DOCLING_PDF_BACKEND", "dlparse_v4"),
+)
+
+DOCLING_TABLE_MODE = PersistentConfig(
+ "DOCLING_TABLE_MODE",
+ "rag.docling_table_mode",
+ os.getenv("DOCLING_TABLE_MODE", "accurate"),
+)
+
+DOCLING_PIPELINE = PersistentConfig(
+ "DOCLING_PIPELINE",
+ "rag.docling_pipeline",
+ os.getenv("DOCLING_PIPELINE", "standard"),
+)
+
DOCLING_DO_PICTURE_DESCRIPTION = PersistentConfig(
"DOCLING_DO_PICTURE_DESCRIPTION",
"rag.docling_do_picture_description",
@@ -3076,6 +3130,12 @@ IMAGES_OPENAI_API_BASE_URL = PersistentConfig(
"image_generation.openai.api_base_url",
os.getenv("IMAGES_OPENAI_API_BASE_URL", OPENAI_API_BASE_URL),
)
+IMAGES_OPENAI_API_VERSION = PersistentConfig(
+ "IMAGES_OPENAI_API_VERSION",
+ "image_generation.openai.api_version",
+ os.getenv("IMAGES_OPENAI_API_VERSION", ""),
+)
+
IMAGES_OPENAI_API_KEY = PersistentConfig(
"IMAGES_OPENAI_API_KEY",
"image_generation.openai.api_key",
diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py
index 83625031ea..b4fdc97d82 100644
--- a/backend/open_webui/env.py
+++ b/backend/open_webui/env.py
@@ -362,6 +362,8 @@ ENABLE_REALTIME_CHAT_SAVE = (
os.environ.get("ENABLE_REALTIME_CHAT_SAVE", "False").lower() == "true"
)
+ENABLE_QUERIES_CACHE = os.environ.get("ENABLE_QUERIES_CACHE", "False").lower() == "true"
+
####################################
# REDIS
####################################
@@ -402,6 +404,10 @@ except ValueError:
####################################
WEBUI_AUTH = os.environ.get("WEBUI_AUTH", "True").lower() == "true"
+
+ENABLE_INITIAL_ADMIN_SIGNUP = (
+ os.environ.get("ENABLE_INITIAL_ADMIN_SIGNUP", "False").lower() == "true"
+)
ENABLE_SIGNUP_PASSWORD_CONFIRMATION = (
os.environ.get("ENABLE_SIGNUP_PASSWORD_CONFIRMATION", "False").lower() == "true"
)
@@ -459,6 +465,19 @@ ENABLE_COMPRESSION_MIDDLEWARE = (
os.environ.get("ENABLE_COMPRESSION_MIDDLEWARE", "True").lower() == "true"
)
+####################################
+# OAUTH Configuration
+####################################
+
+
+ENABLE_OAUTH_ID_TOKEN_COOKIE = (
+ os.environ.get("ENABLE_OAUTH_ID_TOKEN_COOKIE", "True").lower() == "true"
+)
+
+OAUTH_SESSION_TOKEN_ENCRYPTION_KEY = os.environ.get(
+ "OAUTH_SESSION_TOKEN_ENCRYPTION_KEY", WEBUI_SECRET_KEY
+)
+
####################################
# SCIM Configuration
@@ -527,6 +546,19 @@ else:
CHAT_RESPONSE_STREAM_DELTA_CHUNK_SIZE = 1
+CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = os.environ.get(
+ "CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES", "10"
+)
+
+if CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES == "":
+ CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 10
+else:
+ try:
+ CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = int(CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES)
+ except Exception:
+ CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES = 10
+
+
####################################
# WEBSOCKET SUPPORT
####################################
diff --git a/backend/open_webui/functions.py b/backend/open_webui/functions.py
index d8f2a61257..4122cbbe0d 100644
--- a/backend/open_webui/functions.py
+++ b/backend/open_webui/functions.py
@@ -219,6 +219,15 @@ async def generate_function_chat_completion(
__task__ = metadata.get("task", None)
__task_body__ = metadata.get("task_body", None)
+ oauth_token = None
+ try:
+ oauth_token = request.app.state.oauth_manager.get_oauth_token(
+ user.id,
+ request.cookies.get("oauth_session_id", None),
+ )
+ except Exception as e:
+ log.error(f"Error getting OAuth token: {e}")
+
extra_params = {
"__event_emitter__": __event_emitter__,
"__event_call__": __event_call__,
@@ -230,9 +239,10 @@ async def generate_function_chat_completion(
"__files__": files,
"__user__": user.model_dump() if isinstance(user, UserModel) else {},
"__metadata__": metadata,
+ "__oauth_token__": oauth_token,
"__request__": request,
}
- extra_params["__tools__"] = get_tools(
+ extra_params["__tools__"] = await get_tools(
request,
tool_ids,
user,
diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py
index 8ec14e10e8..31a6f0c054 100644
--- a/backend/open_webui/main.py
+++ b/backend/open_webui/main.py
@@ -158,6 +158,7 @@ from open_webui.config import (
IMAGE_SIZE,
IMAGE_STEPS,
IMAGES_OPENAI_API_BASE_URL,
+ IMAGES_OPENAI_API_VERSION,
IMAGES_OPENAI_API_KEY,
IMAGES_GEMINI_API_BASE_URL,
IMAGES_GEMINI_API_KEY,
@@ -244,8 +245,13 @@ from open_webui.config import (
EXTERNAL_DOCUMENT_LOADER_API_KEY,
TIKA_SERVER_URL,
DOCLING_SERVER_URL,
+ DOCLING_DO_OCR,
+ DOCLING_FORCE_OCR,
DOCLING_OCR_ENGINE,
DOCLING_OCR_LANG,
+ DOCLING_PDF_BACKEND,
+ DOCLING_TABLE_MODE,
+ DOCLING_PIPELINE,
DOCLING_DO_PICTURE_DESCRIPTION,
DOCLING_PICTURE_DESCRIPTION_MODE,
DOCLING_PICTURE_DESCRIPTION_LOCAL,
@@ -592,6 +598,7 @@ app = FastAPI(
)
oauth_manager = OAuthManager(app)
+app.state.oauth_manager = oauth_manager
app.state.instance_id = None
app.state.config = AppConfig(
@@ -811,8 +818,13 @@ app.state.config.EXTERNAL_DOCUMENT_LOADER_URL = EXTERNAL_DOCUMENT_LOADER_URL
app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY = EXTERNAL_DOCUMENT_LOADER_API_KEY
app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL
app.state.config.DOCLING_SERVER_URL = DOCLING_SERVER_URL
+app.state.config.DOCLING_DO_OCR = DOCLING_DO_OCR
+app.state.config.DOCLING_FORCE_OCR = DOCLING_FORCE_OCR
app.state.config.DOCLING_OCR_ENGINE = DOCLING_OCR_ENGINE
app.state.config.DOCLING_OCR_LANG = DOCLING_OCR_LANG
+app.state.config.DOCLING_PDF_BACKEND = DOCLING_PDF_BACKEND
+app.state.config.DOCLING_TABLE_MODE = DOCLING_TABLE_MODE
+app.state.config.DOCLING_PIPELINE = DOCLING_PIPELINE
app.state.config.DOCLING_DO_PICTURE_DESCRIPTION = DOCLING_DO_PICTURE_DESCRIPTION
app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE = DOCLING_PICTURE_DESCRIPTION_MODE
app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL = DOCLING_PICTURE_DESCRIPTION_LOCAL
@@ -1020,6 +1032,7 @@ app.state.config.ENABLE_IMAGE_GENERATION = ENABLE_IMAGE_GENERATION
app.state.config.ENABLE_IMAGE_PROMPT_GENERATION = ENABLE_IMAGE_PROMPT_GENERATION
app.state.config.IMAGES_OPENAI_API_BASE_URL = IMAGES_OPENAI_API_BASE_URL
+app.state.config.IMAGES_OPENAI_API_VERSION = IMAGES_OPENAI_API_VERSION
app.state.config.IMAGES_OPENAI_API_KEY = IMAGES_OPENAI_API_KEY
app.state.config.IMAGES_GEMINI_API_BASE_URL = IMAGES_GEMINI_API_BASE_URL
@@ -1407,6 +1420,14 @@ async def chat_completion(
model_item = form_data.pop("model_item", {})
tasks = form_data.pop("background_tasks", None)
+ oauth_token = None
+ try:
+ oauth_token = request.app.state.oauth_manager.get_oauth_token(
+ user.id, request.cookies.get("oauth_session_id", None)
+ )
+ except Exception as e:
+ log.error(f"Error getting OAuth token: {e}")
+
metadata = {}
try:
if not model_item.get("direct", False):
@@ -1439,11 +1460,15 @@ async def chat_completion(
stream_delta_chunk_size = form_data.get("params", {}).get(
"stream_delta_chunk_size"
)
+ reasoning_tags = form_data.get("params", {}).get("reasoning_tags")
# Model Params
if model_info_params.get("stream_delta_chunk_size"):
stream_delta_chunk_size = model_info_params.get("stream_delta_chunk_size")
+ if model_info_params.get("reasoning_tags") is not None:
+ reasoning_tags = model_info_params.get("reasoning_tags")
+
metadata = {
"user_id": user.id,
"chat_id": form_data.pop("chat_id", None),
@@ -1459,6 +1484,7 @@ async def chat_completion(
"direct": model_item.get("direct", False),
"params": {
"stream_delta_chunk_size": stream_delta_chunk_size,
+ "reasoning_tags": reasoning_tags,
"function_calling": (
"native"
if (
@@ -1516,7 +1542,7 @@ async def chat_completion(
try:
event_emitter = get_event_emitter(metadata)
await event_emitter(
- {"type": "task-cancelled"},
+ {"type": "chat:tasks:cancel"},
)
except Exception as e:
pass
@@ -1532,14 +1558,21 @@ async def chat_completion(
"error": {"content": str(e)},
},
)
+
+ event_emitter = get_event_emitter(metadata)
+ await event_emitter(
+ {
+ "type": "chat:message:error",
+ "data": {"error": {"content": str(e)}},
+ }
+ )
+ await event_emitter(
+ {"type": "chat:tasks:cancel"},
+ )
+
except:
pass
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=str(e),
- )
-
if (
metadata.get("session_id")
and metadata.get("chat_id")
@@ -1639,8 +1672,18 @@ async def list_tasks_by_chat_id_endpoint(
@app.get("/api/config")
async def get_app_config(request: Request):
user = None
- if "token" in request.cookies:
+ token = None
+
+ auth_header = request.headers.get("Authorization")
+ if auth_header:
+ cred = get_http_authorization_cred(auth_header)
+ if cred:
+ token = cred.credentials
+
+ if not token and "token" in request.cookies:
token = request.cookies.get("token")
+
+ if token:
try:
data = decode_token(token)
except Exception as e:
diff --git a/backend/open_webui/migrations/versions/38d63c18f30f_add_oauth_session_table.py b/backend/open_webui/migrations/versions/38d63c18f30f_add_oauth_session_table.py
new file mode 100644
index 0000000000..8ead6db6d4
--- /dev/null
+++ b/backend/open_webui/migrations/versions/38d63c18f30f_add_oauth_session_table.py
@@ -0,0 +1,52 @@
+"""Add oauth_session table
+
+Revision ID: 38d63c18f30f
+Revises: 3af16a1c9fb6
+Create Date: 2025-09-08 14:19:59.583921
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "38d63c18f30f"
+down_revision: Union[str, None] = "3af16a1c9fb6"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # Create oauth_session table
+ op.create_table(
+ "oauth_session",
+ sa.Column("id", sa.Text(), nullable=False),
+ sa.Column("user_id", sa.Text(), nullable=False),
+ sa.Column("provider", sa.Text(), nullable=False),
+ sa.Column("token", sa.Text(), nullable=False),
+ sa.Column("expires_at", sa.BigInteger(), nullable=False),
+ sa.Column("created_at", sa.BigInteger(), nullable=False),
+ sa.Column("updated_at", sa.BigInteger(), nullable=False),
+ sa.PrimaryKeyConstraint("id"),
+ sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="CASCADE"),
+ )
+
+ # Create indexes for better performance
+ op.create_index("idx_oauth_session_user_id", "oauth_session", ["user_id"])
+ op.create_index("idx_oauth_session_expires_at", "oauth_session", ["expires_at"])
+ op.create_index(
+ "idx_oauth_session_user_provider", "oauth_session", ["user_id", "provider"]
+ )
+
+
+def downgrade() -> None:
+ # Drop indexes first
+ op.drop_index("idx_oauth_session_user_provider", table_name="oauth_session")
+ op.drop_index("idx_oauth_session_expires_at", table_name="oauth_session")
+ op.drop_index("idx_oauth_session_user_id", table_name="oauth_session")
+
+ # Drop the table
+ op.drop_table("oauth_session")
diff --git a/backend/open_webui/models/files.py b/backend/open_webui/models/files.py
index 6f1511cd13..57978225d4 100644
--- a/backend/open_webui/models/files.py
+++ b/backend/open_webui/models/files.py
@@ -147,6 +147,15 @@ class FilesTable:
with get_db() as db:
return [FileModel.model_validate(file) for file in db.query(File).all()]
+ def check_access_by_user_id(self, id, user_id, permission="write") -> bool:
+ file = self.get_file_by_id(id)
+ if not file:
+ return False
+ if file.user_id == user_id:
+ return True
+ # Implement additional access control logic here as needed
+ return False
+
def get_files_by_ids(self, ids: list[str]) -> list[FileModel]:
with get_db() as db:
return [
diff --git a/backend/open_webui/models/folders.py b/backend/open_webui/models/folders.py
index b597074e81..6fdabff431 100644
--- a/backend/open_webui/models/folders.py
+++ b/backend/open_webui/models/folders.py
@@ -58,6 +58,14 @@ class FolderModel(BaseModel):
class FolderForm(BaseModel):
name: str
data: Optional[dict] = None
+ meta: Optional[dict] = None
+ model_config = ConfigDict(extra="allow")
+
+
+class FolderUpdateForm(BaseModel):
+ name: Optional[str] = None
+ data: Optional[dict] = None
+ meta: Optional[dict] = None
model_config = ConfigDict(extra="allow")
@@ -197,7 +205,7 @@ class FolderTable:
return
def update_folder_by_id_and_user_id(
- self, id: str, user_id: str, form_data: FolderForm
+ self, id: str, user_id: str, form_data: FolderUpdateForm
) -> Optional[FolderModel]:
try:
with get_db() as db:
@@ -228,8 +236,13 @@ class FolderTable:
**form_data["data"],
}
- folder.updated_at = int(time.time())
+ if "meta" in form_data:
+ folder.meta = {
+ **(folder.meta or {}),
+ **form_data["meta"],
+ }
+ folder.updated_at = int(time.time())
db.commit()
return FolderModel.model_validate(folder)
diff --git a/backend/open_webui/models/functions.py b/backend/open_webui/models/functions.py
index 7530573e79..2bb6d60889 100644
--- a/backend/open_webui/models/functions.py
+++ b/backend/open_webui/models/functions.py
@@ -54,6 +54,22 @@ class FunctionModel(BaseModel):
model_config = ConfigDict(from_attributes=True)
+class FunctionWithValvesModel(BaseModel):
+ id: str
+ user_id: str
+ name: str
+ type: str
+ content: str
+ meta: FunctionMeta
+ valves: Optional[dict] = None
+ is_active: bool = False
+ is_global: bool = False
+ updated_at: int # timestamp in epoch
+ created_at: int # timestamp in epoch
+
+ model_config = ConfigDict(from_attributes=True)
+
+
####################
# Forms
####################
@@ -111,8 +127,8 @@ class FunctionsTable:
return None
def sync_functions(
- self, user_id: str, functions: list[FunctionModel]
- ) -> list[FunctionModel]:
+ self, user_id: str, functions: list[FunctionWithValvesModel]
+ ) -> list[FunctionWithValvesModel]:
# Synchronize functions for a user by updating existing ones, inserting new ones, and removing those that are no longer present.
try:
with get_db() as db:
@@ -166,17 +182,24 @@ class FunctionsTable:
except Exception:
return None
- def get_functions(self, active_only=False) -> list[FunctionModel]:
+ def get_functions(
+ self, active_only=False, include_valves=False
+ ) -> list[FunctionModel | FunctionWithValvesModel]:
with get_db() as db:
if active_only:
+ functions = db.query(Function).filter_by(is_active=True).all()
+
+ else:
+ functions = db.query(Function).all()
+
+ if include_valves:
return [
- FunctionModel.model_validate(function)
- for function in db.query(Function).filter_by(is_active=True).all()
+ FunctionWithValvesModel.model_validate(function)
+ for function in functions
]
else:
return [
- FunctionModel.model_validate(function)
- for function in db.query(Function).all()
+ FunctionModel.model_validate(function) for function in functions
]
def get_functions_by_type(
diff --git a/backend/open_webui/models/groups.py b/backend/open_webui/models/groups.py
index 6615f95142..a09b2b73f9 100644
--- a/backend/open_webui/models/groups.py
+++ b/backend/open_webui/models/groups.py
@@ -288,13 +288,17 @@ class GroupTable:
if not group:
return None
- if not group.user_ids:
- group.user_ids = []
+ group_user_ids = group.user_ids
+ if not group_user_ids or not isinstance(group_user_ids, list):
+ group_user_ids = []
+
+ group_user_ids = list(set(group_user_ids)) # Deduplicate
for user_id in user_ids:
- if user_id not in group.user_ids:
- group.user_ids.append(user_id)
+ if user_id not in group_user_ids:
+ group_user_ids.append(user_id)
+ group.user_ids = group_user_ids
group.updated_at = int(time.time())
db.commit()
db.refresh(group)
@@ -312,14 +316,20 @@ class GroupTable:
if not group:
return None
- if not group.user_ids:
+ group_user_ids = group.user_ids
+
+ if not group_user_ids or not isinstance(group_user_ids, list):
return GroupModel.model_validate(group)
- for user_id in user_ids:
- if user_id in group.user_ids:
- group.user_ids.remove(user_id)
+ group_user_ids = list(set(group_user_ids)) # Deduplicate
+ for user_id in user_ids:
+ if user_id in group_user_ids:
+ group_user_ids.remove(user_id)
+
+ group.user_ids = group_user_ids
group.updated_at = int(time.time())
+
db.commit()
db.refresh(group)
return GroupModel.model_validate(group)
diff --git a/backend/open_webui/models/knowledge.py b/backend/open_webui/models/knowledge.py
index bed3d5542e..cfef77e237 100644
--- a/backend/open_webui/models/knowledge.py
+++ b/backend/open_webui/models/knowledge.py
@@ -8,6 +8,7 @@ from open_webui.internal.db import Base, get_db
from open_webui.env import SRC_LOG_LEVELS
from open_webui.models.files import FileMetadataResponse
+from open_webui.models.groups import Groups
from open_webui.models.users import Users, UserResponse
@@ -128,11 +129,18 @@ class KnowledgeTable:
def get_knowledge_bases(self) -> list[KnowledgeUserModel]:
with get_db() as db:
- knowledge_bases = []
- for knowledge in (
+ all_knowledge = (
db.query(Knowledge).order_by(Knowledge.updated_at.desc()).all()
- ):
- user = Users.get_user_by_id(knowledge.user_id)
+ )
+
+ user_ids = list(set(knowledge.user_id for knowledge in all_knowledge))
+
+ users = Users.get_users_by_user_ids(user_ids) if user_ids else []
+ users_dict = {user.id: user for user in users}
+
+ knowledge_bases = []
+ for knowledge in all_knowledge:
+ user = users_dict.get(knowledge.user_id)
knowledge_bases.append(
KnowledgeUserModel.model_validate(
{
@@ -143,15 +151,27 @@ class KnowledgeTable:
)
return knowledge_bases
+ def check_access_by_user_id(self, id, user_id, permission="write") -> bool:
+ knowledge = self.get_knowledge_by_id(id)
+ if not knowledge:
+ return False
+ if knowledge.user_id == user_id:
+ return True
+ user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user_id)}
+ return has_access(user_id, permission, knowledge.access_control, user_group_ids)
+
def get_knowledge_bases_by_user_id(
self, user_id: str, permission: str = "write"
) -> list[KnowledgeUserModel]:
knowledge_bases = self.get_knowledge_bases()
+ user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user_id)}
return [
knowledge_base
for knowledge_base in knowledge_bases
if knowledge_base.user_id == user_id
- or has_access(user_id, permission, knowledge_base.access_control)
+ or has_access(
+ user_id, permission, knowledge_base.access_control, user_group_ids
+ )
]
def get_knowledge_by_id(self, id: str) -> Optional[KnowledgeModel]:
diff --git a/backend/open_webui/models/models.py b/backend/open_webui/models/models.py
index 1a29b86eae..93dafe0f05 100755
--- a/backend/open_webui/models/models.py
+++ b/backend/open_webui/models/models.py
@@ -5,6 +5,7 @@ from typing import Optional
from open_webui.internal.db import Base, JSONField, get_db
from open_webui.env import SRC_LOG_LEVELS
+from open_webui.models.groups import Groups
from open_webui.models.users import Users, UserResponse
@@ -175,9 +176,16 @@ class ModelsTable:
def get_models(self) -> list[ModelUserResponse]:
with get_db() as db:
+ all_models = db.query(Model).filter(Model.base_model_id != None).all()
+
+ user_ids = list(set(model.user_id for model in all_models))
+
+ users = Users.get_users_by_user_ids(user_ids) if user_ids else []
+ users_dict = {user.id: user for user in users}
+
models = []
- for model in db.query(Model).filter(Model.base_model_id != None).all():
- user = Users.get_user_by_id(model.user_id)
+ for model in all_models:
+ user = users_dict.get(model.user_id)
models.append(
ModelUserResponse.model_validate(
{
@@ -199,11 +207,12 @@ class ModelsTable:
self, user_id: str, permission: str = "write"
) -> list[ModelUserResponse]:
models = self.get_models()
+ user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user_id)}
return [
model
for model in models
if model.user_id == user_id
- or has_access(user_id, permission, model.access_control)
+ or has_access(user_id, permission, model.access_control, user_group_ids)
]
def get_model_by_id(self, id: str) -> Optional[ModelModel]:
diff --git a/backend/open_webui/models/notes.py b/backend/open_webui/models/notes.py
index ce3b9f2e20..c720ff80a4 100644
--- a/backend/open_webui/models/notes.py
+++ b/backend/open_webui/models/notes.py
@@ -4,6 +4,7 @@ import uuid
from typing import Optional
from open_webui.internal.db import Base, get_db
+from open_webui.models.groups import Groups
from open_webui.utils.access_control import has_access
from open_webui.models.users import Users, UserResponse
@@ -105,11 +106,12 @@ class NoteTable:
self, user_id: str, permission: str = "write"
) -> list[NoteModel]:
notes = self.get_notes()
+ user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user_id)}
return [
note
for note in notes
if note.user_id == user_id
- or has_access(user_id, permission, note.access_control)
+ or has_access(user_id, permission, note.access_control, user_group_ids)
]
def get_note_by_id(self, id: str) -> Optional[NoteModel]:
diff --git a/backend/open_webui/models/oauth_sessions.py b/backend/open_webui/models/oauth_sessions.py
new file mode 100644
index 0000000000..9fd5335ce5
--- /dev/null
+++ b/backend/open_webui/models/oauth_sessions.py
@@ -0,0 +1,246 @@
+import time
+import logging
+import uuid
+from typing import Optional, List
+import base64
+import hashlib
+import json
+
+from cryptography.fernet import Fernet
+
+from open_webui.internal.db import Base, get_db
+from open_webui.env import SRC_LOG_LEVELS, OAUTH_SESSION_TOKEN_ENCRYPTION_KEY
+
+from pydantic import BaseModel, ConfigDict
+from sqlalchemy import BigInteger, Column, String, Text, Index
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
+####################
+# DB MODEL
+####################
+
+
+class OAuthSession(Base):
+ __tablename__ = "oauth_session"
+
+ id = Column(Text, primary_key=True)
+ user_id = Column(Text, nullable=False)
+ provider = Column(Text, nullable=False)
+ token = Column(
+ Text, nullable=False
+ ) # JSON with access_token, id_token, refresh_token
+ expires_at = Column(BigInteger, nullable=False)
+ created_at = Column(BigInteger, nullable=False)
+ updated_at = Column(BigInteger, nullable=False)
+
+ # Add indexes for better performance
+ __table_args__ = (
+ Index("idx_oauth_session_user_id", "user_id"),
+ Index("idx_oauth_session_expires_at", "expires_at"),
+ Index("idx_oauth_session_user_provider", "user_id", "provider"),
+ )
+
+
+class OAuthSessionModel(BaseModel):
+ id: str
+ user_id: str
+ provider: str
+ token: dict
+ expires_at: int # timestamp in epoch
+ created_at: int # timestamp in epoch
+ updated_at: int # timestamp in epoch
+
+ model_config = ConfigDict(from_attributes=True)
+
+
+####################
+# Forms
+####################
+
+
+class OAuthSessionResponse(BaseModel):
+ id: str
+ user_id: str
+ provider: str
+ expires_at: int
+
+
+class OAuthSessionTable:
+ def __init__(self):
+ self.encryption_key = OAUTH_SESSION_TOKEN_ENCRYPTION_KEY
+ if not self.encryption_key:
+ raise Exception("OAUTH_SESSION_TOKEN_ENCRYPTION_KEY is not set")
+
+ # check if encryption key is in the right format for Fernet (32 url-safe base64-encoded bytes)
+ if len(self.encryption_key) != 44:
+ key_bytes = hashlib.sha256(self.encryption_key.encode()).digest()
+ self.encryption_key = base64.urlsafe_b64encode(key_bytes)
+ else:
+ self.encryption_key = self.encryption_key.encode()
+
+ try:
+ self.fernet = Fernet(self.encryption_key)
+ except Exception as e:
+ log.error(f"Error initializing Fernet with provided key: {e}")
+ raise
+
+ def _encrypt_token(self, token) -> str:
+ """Encrypt OAuth tokens for storage"""
+ try:
+ token_json = json.dumps(token)
+ encrypted = self.fernet.encrypt(token_json.encode()).decode()
+ return encrypted
+ except Exception as e:
+ log.error(f"Error encrypting tokens: {e}")
+ raise
+
+ def _decrypt_token(self, token: str):
+ """Decrypt OAuth tokens from storage"""
+ try:
+ decrypted = self.fernet.decrypt(token.encode()).decode()
+ return json.loads(decrypted)
+ except Exception as e:
+ log.error(f"Error decrypting tokens: {e}")
+ raise
+
+ def create_session(
+ self,
+ user_id: str,
+ provider: str,
+ token: dict,
+ ) -> Optional[OAuthSessionModel]:
+ """Create a new OAuth session"""
+ try:
+ with get_db() as db:
+ current_time = int(time.time())
+ id = str(uuid.uuid4())
+
+ result = OAuthSession(
+ **{
+ "id": id,
+ "user_id": user_id,
+ "provider": provider,
+ "token": self._encrypt_token(token),
+ "expires_at": token.get("expires_at"),
+ "created_at": current_time,
+ "updated_at": current_time,
+ }
+ )
+
+ db.add(result)
+ db.commit()
+ db.refresh(result)
+
+ if result:
+ result.token = token # Return decrypted token
+ return OAuthSessionModel.model_validate(result)
+ else:
+ return None
+ except Exception as e:
+ log.error(f"Error creating OAuth session: {e}")
+ return None
+
+ def get_session_by_id(self, session_id: str) -> Optional[OAuthSessionModel]:
+ """Get OAuth session by ID"""
+ try:
+ with get_db() as db:
+ session = db.query(OAuthSession).filter_by(id=session_id).first()
+ if session:
+ session.token = self._decrypt_token(session.token)
+ return OAuthSessionModel.model_validate(session)
+
+ return None
+ except Exception as e:
+ log.error(f"Error getting OAuth session by ID: {e}")
+ return None
+
+ def get_session_by_id_and_user_id(
+ self, session_id: str, user_id: str
+ ) -> Optional[OAuthSessionModel]:
+ """Get OAuth session by ID and user ID"""
+ try:
+ with get_db() as db:
+ session = (
+ db.query(OAuthSession)
+ .filter_by(id=session_id, user_id=user_id)
+ .first()
+ )
+ if session:
+ session.token = self._decrypt_token(session.token)
+ return OAuthSessionModel.model_validate(session)
+
+ return None
+ except Exception as e:
+ log.error(f"Error getting OAuth session by ID: {e}")
+ return None
+
+ def get_sessions_by_user_id(self, user_id: str) -> List[OAuthSessionModel]:
+ """Get all OAuth sessions for a user"""
+ try:
+ with get_db() as db:
+ sessions = db.query(OAuthSession).filter_by(user_id=user_id).all()
+
+ results = []
+ for session in sessions:
+ session.token = self._decrypt_token(session.token)
+ results.append(OAuthSessionModel.model_validate(session))
+
+ return results
+
+ except Exception as e:
+ log.error(f"Error getting OAuth sessions by user ID: {e}")
+ return []
+
+ def update_session_by_id(
+ self, session_id: str, token: dict
+ ) -> Optional[OAuthSessionModel]:
+ """Update OAuth session tokens"""
+ try:
+ with get_db() as db:
+ current_time = int(time.time())
+
+ db.query(OAuthSession).filter_by(id=session_id).update(
+ {
+ "token": self._encrypt_token(token),
+ "expires_at": token.get("expires_at"),
+ "updated_at": current_time,
+ }
+ )
+ db.commit()
+ session = db.query(OAuthSession).filter_by(id=session_id).first()
+
+ if session:
+ session.token = self._decrypt_token(session.token)
+ return OAuthSessionModel.model_validate(session)
+
+ return None
+ except Exception as e:
+ log.error(f"Error updating OAuth session tokens: {e}")
+ return None
+
+ def delete_session_by_id(self, session_id: str) -> bool:
+ """Delete an OAuth session"""
+ try:
+ with get_db() as db:
+ result = db.query(OAuthSession).filter_by(id=session_id).delete()
+ db.commit()
+ return result > 0
+ except Exception as e:
+ log.error(f"Error deleting OAuth session: {e}")
+ return False
+
+ def delete_sessions_by_user_id(self, user_id: str) -> bool:
+ """Delete all OAuth sessions for a user"""
+ try:
+ with get_db() as db:
+ result = db.query(OAuthSession).filter_by(user_id=user_id).delete()
+ db.commit()
+ return True
+ except Exception as e:
+ log.error(f"Error deleting OAuth sessions by user ID: {e}")
+ return False
+
+
+OAuthSessions = OAuthSessionTable()
diff --git a/backend/open_webui/models/prompts.py b/backend/open_webui/models/prompts.py
index 8ef4cd2bec..7502f34ccd 100644
--- a/backend/open_webui/models/prompts.py
+++ b/backend/open_webui/models/prompts.py
@@ -2,6 +2,7 @@ import time
from typing import Optional
from open_webui.internal.db import Base, get_db
+from open_webui.models.groups import Groups
from open_webui.models.users import Users, UserResponse
from pydantic import BaseModel, ConfigDict
@@ -103,10 +104,16 @@ class PromptsTable:
def get_prompts(self) -> list[PromptUserResponse]:
with get_db() as db:
- prompts = []
+ all_prompts = db.query(Prompt).order_by(Prompt.timestamp.desc()).all()
- for prompt in db.query(Prompt).order_by(Prompt.timestamp.desc()).all():
- user = Users.get_user_by_id(prompt.user_id)
+ user_ids = list(set(prompt.user_id for prompt in all_prompts))
+
+ users = Users.get_users_by_user_ids(user_ids) if user_ids else []
+ users_dict = {user.id: user for user in users}
+
+ prompts = []
+ for prompt in all_prompts:
+ user = users_dict.get(prompt.user_id)
prompts.append(
PromptUserResponse.model_validate(
{
@@ -122,12 +129,13 @@ class PromptsTable:
self, user_id: str, permission: str = "write"
) -> list[PromptUserResponse]:
prompts = self.get_prompts()
+ user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user_id)}
return [
prompt
for prompt in prompts
if prompt.user_id == user_id
- or has_access(user_id, permission, prompt.access_control)
+ or has_access(user_id, permission, prompt.access_control, user_group_ids)
]
def update_prompt_by_command(
diff --git a/backend/open_webui/models/tools.py b/backend/open_webui/models/tools.py
index 7f1409a900..3a47fa008d 100644
--- a/backend/open_webui/models/tools.py
+++ b/backend/open_webui/models/tools.py
@@ -4,6 +4,8 @@ from typing import Optional
from open_webui.internal.db import Base, JSONField, get_db
from open_webui.models.users import Users, UserResponse
+from open_webui.models.groups import Groups
+
from open_webui.env import SRC_LOG_LEVELS
from pydantic import BaseModel, ConfigDict
from sqlalchemy import BigInteger, Column, String, Text, JSON
@@ -144,9 +146,16 @@ class ToolsTable:
def get_tools(self) -> list[ToolUserModel]:
with get_db() as db:
+ all_tools = db.query(Tool).order_by(Tool.updated_at.desc()).all()
+
+ user_ids = list(set(tool.user_id for tool in all_tools))
+
+ users = Users.get_users_by_user_ids(user_ids) if user_ids else []
+ users_dict = {user.id: user for user in users}
+
tools = []
- for tool in db.query(Tool).order_by(Tool.updated_at.desc()).all():
- user = Users.get_user_by_id(tool.user_id)
+ for tool in all_tools:
+ user = users_dict.get(tool.user_id)
tools.append(
ToolUserModel.model_validate(
{
@@ -161,12 +170,13 @@ class ToolsTable:
self, user_id: str, permission: str = "write"
) -> list[ToolUserModel]:
tools = self.get_tools()
+ user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user_id)}
return [
tool
for tool in tools
if tool.user_id == user_id
- or has_access(user_id, permission, tool.access_control)
+ or has_access(user_id, permission, tool.access_control, user_group_ids)
]
def get_tool_valves_by_id(self, id: str) -> Optional[dict]:
diff --git a/backend/open_webui/retrieval/loaders/datalab_marker.py b/backend/open_webui/retrieval/loaders/datalab_marker.py
index cc6c7ce79d..8d14be0a40 100644
--- a/backend/open_webui/retrieval/loaders/datalab_marker.py
+++ b/backend/open_webui/retrieval/loaders/datalab_marker.py
@@ -64,7 +64,7 @@ class DatalabMarkerLoader:
return mime_map.get(ext, "application/octet-stream")
def check_marker_request_status(self, request_id: str) -> dict:
- url = f"{self.api_base_url}/marker/{request_id}"
+ url = f"{self.api_base_url}/{request_id}"
headers = {"X-Api-Key": self.api_key}
try:
response = requests.get(url, headers=headers)
@@ -111,7 +111,7 @@ class DatalabMarkerLoader:
with open(self.file_path, "rb") as f:
files = {"file": (filename, f, mime_type)}
response = requests.post(
- f"{self.api_base_url}/marker",
+ f"{self.api_base_url}",
data=form_data,
files=files,
headers=headers,
diff --git a/backend/open_webui/retrieval/loaders/external_document.py b/backend/open_webui/retrieval/loaders/external_document.py
index c0ccd72432..1be2ca3f24 100644
--- a/backend/open_webui/retrieval/loaders/external_document.py
+++ b/backend/open_webui/retrieval/loaders/external_document.py
@@ -1,6 +1,7 @@
import requests
import logging, os
from typing import Iterator, List, Union
+from urllib.parse import quote
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
@@ -37,7 +38,7 @@ class ExternalDocumentLoader(BaseLoader):
headers["Authorization"] = f"Bearer {self.api_key}"
try:
- headers["X-Filename"] = os.path.basename(self.file_path)
+ headers["X-Filename"] = quote(os.path.basename(self.file_path))
except:
pass
diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py
index 241cd7dbe8..45f3d8c941 100644
--- a/backend/open_webui/retrieval/loaders/main.py
+++ b/backend/open_webui/retrieval/loaders/main.py
@@ -4,6 +4,7 @@ import ftfy
import sys
import json
+from azure.identity import DefaultAzureCredential
from langchain_community.document_loaders import (
AzureAIDocumentIntelligenceLoader,
BSHTMLLoader,
@@ -147,7 +148,7 @@ class DoclingLoader:
)
}
- params = {"image_export_mode": "placeholder", "table_mode": "accurate"}
+ params = {"image_export_mode": "placeholder"}
if self.params:
if self.params.get("do_picture_description"):
@@ -173,7 +174,15 @@ class DoclingLoader:
self.params.get("picture_description_api", {})
)
- if self.params.get("ocr_engine") and self.params.get("ocr_lang"):
+ params["do_ocr"] = self.params.get("do_ocr")
+
+ params["force_ocr"] = self.params.get("force_ocr")
+
+ if (
+ self.params.get("do_ocr")
+ and self.params.get("ocr_engine")
+ and self.params.get("ocr_lang")
+ ):
params["ocr_engine"] = self.params.get("ocr_engine")
params["ocr_lang"] = [
lang.strip()
@@ -181,6 +190,15 @@ class DoclingLoader:
if lang.strip()
]
+ if self.params.get("pdf_backend"):
+ params["pdf_backend"] = self.params.get("pdf_backend")
+
+ if self.params.get("table_mode"):
+ params["table_mode"] = self.params.get("table_mode")
+
+ if self.params.get("pipeline"):
+ params["pipeline"] = self.params.get("pipeline")
+
endpoint = f"{self.url}/v1/convert/file"
r = requests.post(endpoint, files=files, data=params)
@@ -283,7 +301,7 @@ class Loader:
):
api_base_url = self.kwargs.get("DATALAB_MARKER_API_BASE_URL", "")
if not api_base_url or api_base_url.strip() == "":
- api_base_url = "https://www.datalab.to/api/v1"
+ api_base_url = "https://www.datalab.to/api/v1/marker" # https://github.com/open-webui/open-webui/pull/16867#issuecomment-3218424349
loader = DatalabMarkerLoader(
file_path=file_path,
@@ -327,7 +345,6 @@ class Loader:
elif (
self.engine == "document_intelligence"
and self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT") != ""
- and self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY") != ""
and (
file_ext in ["pdf", "xls", "xlsx", "docx", "ppt", "pptx"]
or file_content_type
@@ -340,11 +357,18 @@ class Loader:
]
)
):
- loader = AzureAIDocumentIntelligenceLoader(
- file_path=file_path,
- api_endpoint=self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT"),
- api_key=self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY"),
- )
+ if self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY") != "":
+ loader = AzureAIDocumentIntelligenceLoader(
+ file_path=file_path,
+ api_endpoint=self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT"),
+ api_key=self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY"),
+ )
+ else:
+ loader = AzureAIDocumentIntelligenceLoader(
+ file_path=file_path,
+ api_endpoint=self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT"),
+ azure_credential=DefaultAzureCredential(),
+ )
elif (
self.engine == "mistral_ocr"
and self.kwargs.get("MISTRAL_OCR_API_KEY") != ""
diff --git a/backend/open_webui/retrieval/loaders/youtube.py b/backend/open_webui/retrieval/loaders/youtube.py
index be5e533588..360ef0a6c7 100644
--- a/backend/open_webui/retrieval/loaders/youtube.py
+++ b/backend/open_webui/retrieval/loaders/youtube.py
@@ -98,10 +98,9 @@ class YoutubeLoader:
else:
youtube_proxies = None
+ transcript_api = YouTubeTranscriptApi(proxy_config=youtube_proxies)
try:
- transcript_list = YouTubeTranscriptApi.list_transcripts(
- self.video_id, proxies=youtube_proxies
- )
+ transcript_list = transcript_api.list(self.video_id)
except Exception as e:
log.exception("Loading YouTube transcript failed")
return []
diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py
index 862972187d..dead8458cb 100644
--- a/backend/open_webui/retrieval/utils.py
+++ b/backend/open_webui/retrieval/utils.py
@@ -124,14 +124,17 @@ def query_doc_with_hybrid_search(
hybrid_bm25_weight: float,
) -> dict:
try:
- # BM_25 required only if weight is greater than 0
- if hybrid_bm25_weight > 0:
- log.debug(f"query_doc_with_hybrid_search:doc {collection_name}")
- bm25_retriever = BM25Retriever.from_texts(
- texts=collection_result.documents[0],
- metadatas=collection_result.metadatas[0],
- )
- bm25_retriever.k = k
+ if not collection_result.documents[0]:
+ log.warning(f"query_doc_with_hybrid_search:no_docs {collection_name}")
+ return {"documents": [], "metadatas": [], "distances": []}
+
+ log.debug(f"query_doc_with_hybrid_search:doc {collection_name}")
+
+ bm25_retriever = BM25Retriever.from_texts(
+ texts=collection_result.documents[0],
+ metadatas=collection_result.metadatas[0],
+ )
+ bm25_retriever.k = k
vector_search_retriever = VectorSearchRetriever(
collection_name=collection_name,
@@ -339,22 +342,18 @@ def query_collection_with_hybrid_search(
# Fetch collection data once per collection sequentially
# Avoid fetching the same data multiple times later
collection_results = {}
- # Only retrieve entire collection if bm_25 calculation is required
- if hybrid_bm25_weight > 0:
- for collection_name in collection_names:
- try:
- log.debug(
- f"query_collection_with_hybrid_search:VECTOR_DB_CLIENT.get:collection {collection_name}"
- )
- collection_results[collection_name] = VECTOR_DB_CLIENT.get(
- collection_name=collection_name
- )
- except Exception as e:
- log.exception(f"Failed to fetch collection {collection_name}: {e}")
- collection_results[collection_name] = None
- else:
- for collection_name in collection_names:
- collection_results[collection_name] = []
+ for collection_name in collection_names:
+ try:
+ log.debug(
+ f"query_collection_with_hybrid_search:VECTOR_DB_CLIENT.get:collection {collection_name}"
+ )
+ collection_results[collection_name] = VECTOR_DB_CLIENT.get(
+ collection_name=collection_name
+ )
+ except Exception as e:
+ log.exception(f"Failed to fetch collection {collection_name}: {e}")
+ collection_results[collection_name] = None
+
log.info(
f"Starting hybrid search for {len(queries)} queries in {len(collection_names)} collections..."
)
@@ -489,17 +488,18 @@ def get_sources_from_items(
if item.get("type") == "text":
# Raw Text
- # Used during temporary chat file uploads
+ # Used during temporary chat file uploads or web page & youtube attachements
- if item.get("file"):
+ if item.get("collection_name"):
+ # If item has a collection name, use it
+ collection_names.append(item.get("collection_name"))
+ elif item.get("file"):
# if item has file data, use it
query_result = {
"documents": [
[item.get("file", {}).get("data", {}).get("content")]
],
- "metadatas": [
- [item.get("file", {}).get("data", {}).get("meta", {})]
- ],
+ "metadatas": [[item.get("file", {}).get("meta", {})]],
}
else:
# Fallback to item content
diff --git a/backend/open_webui/retrieval/vector/dbs/pgvector.py b/backend/open_webui/retrieval/vector/dbs/pgvector.py
index d978f0c824..06c1698cdd 100644
--- a/backend/open_webui/retrieval/vector/dbs/pgvector.py
+++ b/backend/open_webui/retrieval/vector/dbs/pgvector.py
@@ -37,6 +37,7 @@ from open_webui.retrieval.vector.main import (
from open_webui.config import (
PGVECTOR_DB_URL,
PGVECTOR_INITIALIZE_MAX_VECTOR_LENGTH,
+ PGVECTOR_CREATE_EXTENSION,
PGVECTOR_PGCRYPTO,
PGVECTOR_PGCRYPTO_KEY,
PGVECTOR_POOL_SIZE,
@@ -112,18 +113,19 @@ class PgvectorClient(VectorDBBase):
try:
# Ensure the pgvector extension is available
# Use a conditional check to avoid permission issues on Azure PostgreSQL
- self.session.execute(
- text(
- """
- DO $$
- BEGIN
- IF NOT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'vector') THEN
- CREATE EXTENSION IF NOT EXISTS vector;
- END IF;
- END $$;
- """
+ if PGVECTOR_CREATE_EXTENSION:
+ self.session.execute(
+ text(
+ """
+ DO $$
+ BEGIN
+ IF NOT EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'vector') THEN
+ CREATE EXTENSION IF NOT EXISTS vector;
+ END IF;
+ END $$;
+ """
+ )
)
- )
if PGVECTOR_PGCRYPTO:
# Ensure the pgcrypto extension is available for encryption
diff --git a/backend/open_webui/retrieval/web/utils.py b/backend/open_webui/retrieval/web/utils.py
index 5a90a86e0f..5ba27ee8f0 100644
--- a/backend/open_webui/retrieval/web/utils.py
+++ b/backend/open_webui/retrieval/web/utils.py
@@ -517,6 +517,7 @@ class SafeWebBaseLoader(WebBaseLoader):
async with session.get(
url,
**(self.requests_kwargs | kwargs),
+ allow_redirects=False,
) as response:
if self.raise_for_status:
response.raise_for_status()
@@ -614,7 +615,7 @@ def get_web_loader(
WebLoaderClass = SafeWebBaseLoader
if WEB_LOADER_ENGINE.value == "playwright":
WebLoaderClass = SafePlaywrightURLLoader
- web_loader_args["playwright_timeout"] = PLAYWRIGHT_TIMEOUT.value * 1000
+ web_loader_args["playwright_timeout"] = PLAYWRIGHT_TIMEOUT.value
if PLAYWRIGHT_WS_URL.value:
web_loader_args["playwright_ws_url"] = PLAYWRIGHT_WS_URL.value
diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py
index cc5711569d..4d50ee9e7e 100644
--- a/backend/open_webui/routers/audio.py
+++ b/backend/open_webui/routers/audio.py
@@ -4,7 +4,6 @@ import logging
import os
import uuid
from functools import lru_cache
-from pathlib import Path
from pydub import AudioSegment
from pydub.silence import split_on_silence
from concurrent.futures import ThreadPoolExecutor
@@ -15,7 +14,7 @@ import aiohttp
import aiofiles
import requests
import mimetypes
-from urllib.parse import quote
+from urllib.parse import urljoin, quote
from fastapi import (
Depends,
@@ -338,7 +337,10 @@ async def speech(request: Request, user=Depends(get_verified_user)):
timeout=timeout, trust_env=True
) as session:
r = await session.post(
- url=f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/speech",
+ url=urljoin(
+ request.app.state.config.TTS_OPENAI_API_BASE_URL,
+ "/audio/speech",
+ ),
json=payload,
headers={
"Content-Type": "application/json",
@@ -466,8 +468,10 @@ async def speech(request: Request, user=Depends(get_verified_user)):
timeout=timeout, trust_env=True
) as session:
async with session.post(
- (base_url or f"https://{region}.tts.speech.microsoft.com")
- + "/cognitiveservices/v1",
+ urljoin(
+ base_url or f"https://{region}.tts.speech.microsoft.com",
+ "/cognitiveservices/v1",
+ ),
headers={
"Ocp-Apim-Subscription-Key": request.app.state.config.TTS_API_KEY,
"Content-Type": "application/ssml+xml",
diff --git a/backend/open_webui/routers/auths.py b/backend/open_webui/routers/auths.py
index 11254ec78c..e3271250c1 100644
--- a/backend/open_webui/routers/auths.py
+++ b/backend/open_webui/routers/auths.py
@@ -19,6 +19,7 @@ from open_webui.models.auths import (
)
from open_webui.models.users import Users, UpdateProfileForm
from open_webui.models.groups import Groups
+from open_webui.models.oauth_sessions import OAuthSessions
from open_webui.constants import ERROR_MESSAGES, WEBHOOK_MESSAGES
from open_webui.env import (
@@ -29,6 +30,7 @@ from open_webui.env import (
WEBUI_AUTH_COOKIE_SAME_SITE,
WEBUI_AUTH_COOKIE_SECURE,
WEBUI_AUTH_SIGNOUT_REDIRECT_URL,
+ ENABLE_INITIAL_ADMIN_SIGNUP,
SRC_LOG_LEVELS,
)
from fastapi import APIRouter, Depends, HTTPException, Request, status
@@ -569,9 +571,10 @@ async def signup(request: Request, response: Response, form_data: SignupForm):
not request.app.state.config.ENABLE_SIGNUP
or not request.app.state.config.ENABLE_LOGIN_FORM
):
- raise HTTPException(
- status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.ACCESS_PROHIBITED
- )
+ if has_users or not ENABLE_INITIAL_ADMIN_SIGNUP:
+ raise HTTPException(
+ status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.ACCESS_PROHIBITED
+ )
else:
if has_users:
raise HTTPException(
@@ -674,19 +677,29 @@ async def signup(request: Request, response: Response, form_data: SignupForm):
async def signout(request: Request, response: Response):
response.delete_cookie("token")
response.delete_cookie("oui-session")
+ response.delete_cookie("oauth_id_token")
- if ENABLE_OAUTH_SIGNUP.value:
- oauth_id_token = request.cookies.get("oauth_id_token")
- if oauth_id_token and OPENID_PROVIDER_URL.value:
+ oauth_session_id = request.cookies.get("oauth_session_id")
+ if oauth_session_id:
+ response.delete_cookie("oauth_session_id")
+
+ session = OAuthSessions.get_session_by_id(oauth_session_id)
+ oauth_server_metadata_url = (
+ request.app.state.oauth_manager.get_server_metadata_url(session.provider)
+ if session
+ else None
+ ) or OPENID_PROVIDER_URL.value
+
+ if session and oauth_server_metadata_url:
+ oauth_id_token = session.token.get("id_token")
try:
async with ClientSession(trust_env=True) as session:
- async with session.get(OPENID_PROVIDER_URL.value) as resp:
- if resp.status == 200:
- openid_data = await resp.json()
+ async with session.get(oauth_server_metadata_url) as r:
+ if r.status == 200:
+ openid_data = await r.json()
logout_url = openid_data.get("end_session_endpoint")
- if logout_url:
- response.delete_cookie("oauth_id_token")
+ if logout_url:
return JSONResponse(
status_code=200,
content={
@@ -701,15 +714,14 @@ async def signout(request: Request, response: Response):
headers=response.headers,
)
else:
- raise HTTPException(
- status_code=resp.status,
- detail="Failed to fetch OpenID configuration",
- )
+ raise Exception("Failed to fetch OpenID configuration")
+
except Exception as e:
log.error(f"OpenID signout error: {str(e)}")
raise HTTPException(
status_code=500,
detail="Failed to sign out from the OpenID provider.",
+ headers=response.headers,
)
if WEBUI_AUTH_SIGNOUT_REDIRECT_URL:
diff --git a/backend/open_webui/routers/files.py b/backend/open_webui/routers/files.py
index 3b46d0bd8a..778fbdec27 100644
--- a/backend/open_webui/routers/files.py
+++ b/backend/open_webui/routers/files.py
@@ -143,9 +143,18 @@ def upload_file(
file: UploadFile = File(...),
metadata: Optional[dict | str] = Form(None),
process: bool = Query(True),
+ process_in_background: bool = Query(True),
user=Depends(get_verified_user),
):
- return upload_file_handler(request, file, metadata, process, user, background_tasks)
+ return upload_file_handler(
+ request,
+ file=file,
+ metadata=metadata,
+ process=process,
+ process_in_background=process_in_background,
+ user=user,
+ background_tasks=background_tasks,
+ )
def upload_file_handler(
@@ -153,6 +162,7 @@ def upload_file_handler(
file: UploadFile = File(...),
metadata: Optional[dict | str] = Form(None),
process: bool = Query(True),
+ process_in_background: bool = Query(True),
user=Depends(get_verified_user),
background_tasks: Optional[BackgroundTasks] = None,
):
@@ -225,7 +235,7 @@ def upload_file_handler(
)
if process:
- if background_tasks:
+ if background_tasks and process_in_background:
background_tasks.add_task(
process_uploaded_file,
request,
@@ -401,25 +411,28 @@ async def get_file_process_status(
MAX_FILE_PROCESSING_DURATION = 3600 * 2
async def event_stream(file_item):
- for _ in range(MAX_FILE_PROCESSING_DURATION):
- file_item = Files.get_file_by_id(file_item.id)
- if file_item:
- data = file_item.model_dump().get("data", {})
- status = data.get("status")
+ if file_item:
+ for _ in range(MAX_FILE_PROCESSING_DURATION):
+ file_item = Files.get_file_by_id(file_item.id)
+ if file_item:
+ data = file_item.model_dump().get("data", {})
+ status = data.get("status")
- if status:
- event = {"status": status}
- if status == "failed":
- event["error"] = data.get("error")
+ if status:
+ event = {"status": status}
+ if status == "failed":
+ event["error"] = data.get("error")
- yield f"data: {json.dumps(event)}\n\n"
- if status in ("completed", "failed"):
+ yield f"data: {json.dumps(event)}\n\n"
+ if status in ("completed", "failed"):
+ break
+ else:
+ # Legacy
break
- else:
- # Legacy
- break
- await asyncio.sleep(0.5)
+ await asyncio.sleep(0.5)
+ else:
+ yield f"data: {json.dumps({'status': 'not_found'})}\n\n"
return StreamingResponse(
event_stream(file),
diff --git a/backend/open_webui/routers/folders.py b/backend/open_webui/routers/folders.py
index e419989e46..36dbfee5c5 100644
--- a/backend/open_webui/routers/folders.py
+++ b/backend/open_webui/routers/folders.py
@@ -10,10 +10,14 @@ import mimetypes
from open_webui.models.folders import (
FolderForm,
+ FolderUpdateForm,
FolderModel,
Folders,
)
from open_webui.models.chats import Chats
+from open_webui.models.files import Files
+from open_webui.models.knowledge import Knowledges
+
from open_webui.config import UPLOAD_DIR
from open_webui.env import SRC_LOG_LEVELS
@@ -44,6 +48,31 @@ router = APIRouter()
async def get_folders(user=Depends(get_verified_user)):
folders = Folders.get_folders_by_user_id(user.id)
+ # Verify folder data integrity
+ for folder in folders:
+ if folder.data:
+ if "files" in folder.data:
+ valid_files = []
+ for file in folder.data["files"]:
+
+ if file.get("type") == "file":
+ if Files.check_access_by_user_id(
+ file.get("id"), user.id, "read"
+ ):
+ valid_files.append(file)
+ elif file.get("type") == "collection":
+ if Knowledges.check_access_by_user_id(
+ file.get("id"), user.id, "read"
+ ):
+ valid_files.append(file)
+ else:
+ valid_files.append(file)
+
+ folder.data["files"] = valid_files
+ Folders.update_folder_by_id_and_user_id(
+ folder.id, user.id, FolderUpdateForm(data=folder.data)
+ )
+
return [
{
**folder.model_dump(),
@@ -113,22 +142,24 @@ async def get_folder_by_id(id: str, user=Depends(get_verified_user)):
@router.post("/{id}/update")
async def update_folder_name_by_id(
- id: str, form_data: FolderForm, user=Depends(get_verified_user)
+ id: str, form_data: FolderUpdateForm, user=Depends(get_verified_user)
):
folder = Folders.get_folder_by_id_and_user_id(id, user.id)
if folder:
- existing_folder = Folders.get_folder_by_parent_id_and_user_id_and_name(
- folder.parent_id, user.id, form_data.name
- )
- if existing_folder and existing_folder.id != id:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.DEFAULT("Folder already exists"),
+
+ if form_data.name is not None:
+ # Check if folder with same name exists
+ existing_folder = Folders.get_folder_by_parent_id_and_user_id_and_name(
+ folder.parent_id, user.id, form_data.name
)
+ if existing_folder and existing_folder.id != id:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail=ERROR_MESSAGES.DEFAULT("Folder already exists"),
+ )
try:
folder = Folders.update_folder_by_id_and_user_id(id, user.id, form_data)
-
return folder
except Exception as e:
log.exception(e)
diff --git a/backend/open_webui/routers/functions.py b/backend/open_webui/routers/functions.py
index b5beb96cf0..9ef6915709 100644
--- a/backend/open_webui/routers/functions.py
+++ b/backend/open_webui/routers/functions.py
@@ -10,6 +10,7 @@ from open_webui.models.functions import (
FunctionForm,
FunctionModel,
FunctionResponse,
+ FunctionWithValvesModel,
Functions,
)
from open_webui.utils.plugin import (
@@ -46,9 +47,9 @@ async def get_functions(user=Depends(get_verified_user)):
############################
-@router.get("/export", response_model=list[FunctionModel])
-async def get_functions(user=Depends(get_admin_user)):
- return Functions.get_functions()
+@router.get("/export", response_model=list[FunctionModel | FunctionWithValvesModel])
+async def get_functions(include_valves: bool = False, user=Depends(get_admin_user)):
+ return Functions.get_functions(include_valves=include_valves)
############################
@@ -132,10 +133,10 @@ async def load_function_from_url(
class SyncFunctionsForm(BaseModel):
- functions: list[FunctionModel] = []
+ functions: list[FunctionWithValvesModel] = []
-@router.post("/sync", response_model=list[FunctionModel])
+@router.post("/sync", response_model=list[FunctionWithValvesModel])
async def sync_functions(
request: Request, form_data: SyncFunctionsForm, user=Depends(get_admin_user)
):
diff --git a/backend/open_webui/routers/images.py b/backend/open_webui/routers/images.py
index 9311cb6e2c..802a3e9924 100644
--- a/backend/open_webui/routers/images.py
+++ b/backend/open_webui/routers/images.py
@@ -48,6 +48,7 @@ async def get_config(request: Request, user=Depends(get_admin_user)):
"prompt_generation": request.app.state.config.ENABLE_IMAGE_PROMPT_GENERATION,
"openai": {
"OPENAI_API_BASE_URL": request.app.state.config.IMAGES_OPENAI_API_BASE_URL,
+ "OPENAI_API_VERSION": request.app.state.config.IMAGES_OPENAI_API_VERSION,
"OPENAI_API_KEY": request.app.state.config.IMAGES_OPENAI_API_KEY,
},
"automatic1111": {
@@ -72,6 +73,7 @@ async def get_config(request: Request, user=Depends(get_admin_user)):
class OpenAIConfigForm(BaseModel):
OPENAI_API_BASE_URL: str
+ OPENAI_API_VERSION: str
OPENAI_API_KEY: str
@@ -119,6 +121,9 @@ async def update_config(
request.app.state.config.IMAGES_OPENAI_API_BASE_URL = (
form_data.openai.OPENAI_API_BASE_URL
)
+ request.app.state.config.IMAGES_OPENAI_API_VERSION = (
+ form_data.openai.OPENAI_API_VERSION
+ )
request.app.state.config.IMAGES_OPENAI_API_KEY = form_data.openai.OPENAI_API_KEY
request.app.state.config.IMAGES_GEMINI_API_BASE_URL = (
@@ -165,6 +170,7 @@ async def update_config(
"prompt_generation": request.app.state.config.ENABLE_IMAGE_PROMPT_GENERATION,
"openai": {
"OPENAI_API_BASE_URL": request.app.state.config.IMAGES_OPENAI_API_BASE_URL,
+ "OPENAI_API_VERSION": request.app.state.config.IMAGES_OPENAI_API_VERSION,
"OPENAI_API_KEY": request.app.state.config.IMAGES_OPENAI_API_KEY,
},
"automatic1111": {
@@ -544,10 +550,16 @@ async def image_generations(
),
}
+ api_version_query_param = ""
+ if request.app.state.config.IMAGES_OPENAI_API_VERSION:
+ api_version_query_param = (
+ f"?api-version={request.app.state.config.IMAGES_OPENAI_API_VERSION}"
+ )
+
# Use asyncio.to_thread for the requests.post call
r = await asyncio.to_thread(
requests.post,
- url=f"{request.app.state.config.IMAGES_OPENAI_API_BASE_URL}/images/generations",
+ url=f"{request.app.state.config.IMAGES_OPENAI_API_BASE_URL}/images/generations{api_version_query_param}",
json=data,
headers=headers,
)
diff --git a/backend/open_webui/routers/knowledge.py b/backend/open_webui/routers/knowledge.py
index e9ba9c39ad..71722d706e 100644
--- a/backend/open_webui/routers/knowledge.py
+++ b/backend/open_webui/routers/knowledge.py
@@ -1,6 +1,6 @@
from typing import List, Optional
from pydantic import BaseModel
-from fastapi import APIRouter, Depends, HTTPException, status, Request
+from fastapi import APIRouter, Depends, HTTPException, status, Request, Query
import logging
from open_webui.models.knowledge import (
@@ -151,6 +151,18 @@ async def create_new_knowledge(
detail=ERROR_MESSAGES.UNAUTHORIZED,
)
+ # Check if user can share publicly
+ if (
+ user.role != "admin"
+ and form_data.access_control == None
+ and not has_permission(
+ user.id,
+ "sharing.public_knowledge",
+ request.app.state.config.USER_PERMISSIONS,
+ )
+ ):
+ form_data.access_control = {}
+
knowledge = Knowledges.insert_new_knowledge(user.id, form_data)
if knowledge:
@@ -285,6 +297,7 @@ async def get_knowledge_by_id(id: str, user=Depends(get_verified_user)):
@router.post("/{id}/update", response_model=Optional[KnowledgeFilesResponse])
async def update_knowledge_by_id(
+ request: Request,
id: str,
form_data: KnowledgeForm,
user=Depends(get_verified_user),
@@ -306,10 +319,22 @@ async def update_knowledge_by_id(
detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
)
+ # Check if user can share publicly
+ if (
+ user.role != "admin"
+ and form_data.access_control == None
+ and not has_permission(
+ user.id,
+ "sharing.public_knowledge",
+ request.app.state.config.USER_PERMISSIONS,
+ )
+ ):
+ form_data.access_control = {}
+
knowledge = Knowledges.update_knowledge_by_id(id=id, form_data=form_data)
if knowledge:
file_ids = knowledge.data.get("file_ids", []) if knowledge.data else []
- files = Files.get_files_by_ids(file_ids)
+ files = Files.get_file_metadatas_by_ids(file_ids)
return KnowledgeFilesResponse(
**knowledge.model_dump(),
@@ -492,6 +517,7 @@ def update_file_from_knowledge_by_id(
def remove_file_from_knowledge_by_id(
id: str,
form_data: KnowledgeFileIdForm,
+ delete_file: bool = Query(True),
user=Depends(get_verified_user),
):
knowledge = Knowledges.get_knowledge_by_id(id=id)
@@ -528,18 +554,19 @@ def remove_file_from_knowledge_by_id(
log.debug(e)
pass
- try:
- # Remove the file's collection from vector database
- file_collection = f"file-{form_data.file_id}"
- if VECTOR_DB_CLIENT.has_collection(collection_name=file_collection):
- VECTOR_DB_CLIENT.delete_collection(collection_name=file_collection)
- except Exception as e:
- log.debug("This was most likely caused by bypassing embedding processing")
- log.debug(e)
- pass
+ if delete_file:
+ try:
+ # Remove the file's collection from vector database
+ file_collection = f"file-{form_data.file_id}"
+ if VECTOR_DB_CLIENT.has_collection(collection_name=file_collection):
+ VECTOR_DB_CLIENT.delete_collection(collection_name=file_collection)
+ except Exception as e:
+ log.debug("This was most likely caused by bypassing embedding processing")
+ log.debug(e)
+ pass
- # Delete file from database
- Files.delete_file_by_id(form_data.file_id)
+ # Delete file from database
+ Files.delete_file_by_id(form_data.file_id)
if knowledge:
data = knowledge.data or {}
diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py
index 11bf5b914f..8dadf3523a 100644
--- a/backend/open_webui/routers/ollama.py
+++ b/backend/open_webui/routers/ollama.py
@@ -329,17 +329,21 @@ def merge_ollama_models_lists(model_lists):
for idx, model_list in enumerate(model_lists):
if model_list is not None:
for model in model_list:
- id = model["model"]
- if id not in merged_models:
- model["urls"] = [idx]
- merged_models[id] = model
- else:
- merged_models[id]["urls"].append(idx)
+ id = model.get("model")
+ if id is not None:
+ if id not in merged_models:
+ model["urls"] = [idx]
+ merged_models[id] = model
+ else:
+ merged_models[id]["urls"].append(idx)
return list(merged_models.values())
-@cached(ttl=MODELS_CACHE_TTL)
+@cached(
+ ttl=MODELS_CACHE_TTL,
+ key=lambda _, user: f"ollama_all_models_{user.id}" if user else "ollama_all_models",
+)
async def get_all_models(request: Request, user: UserModel = None):
log.info("get_all_models()")
if request.app.state.config.ENABLE_OLLAMA_API:
diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py
index 7ba0c5f68a..184f47038d 100644
--- a/backend/open_webui/routers/openai.py
+++ b/backend/open_webui/routers/openai.py
@@ -119,6 +119,74 @@ def openai_reasoning_model_handler(payload):
return payload
+def get_headers_and_cookies(
+ request: Request,
+ url,
+ key=None,
+ config=None,
+ metadata: Optional[dict] = None,
+ user: UserModel = None,
+):
+ cookies = {}
+ headers = {
+ "Content-Type": "application/json",
+ **(
+ {
+ "HTTP-Referer": "https://openwebui.com/",
+ "X-Title": "Open WebUI",
+ }
+ if "openrouter.ai" in url
+ else {}
+ ),
+ **(
+ {
+ "X-OpenWebUI-User-Name": quote(user.name, safe=" "),
+ "X-OpenWebUI-User-Id": user.id,
+ "X-OpenWebUI-User-Email": user.email,
+ "X-OpenWebUI-User-Role": user.role,
+ **(
+ {"X-OpenWebUI-Chat-Id": metadata.get("chat_id")}
+ if metadata and metadata.get("chat_id")
+ else {}
+ ),
+ }
+ if ENABLE_FORWARD_USER_INFO_HEADERS
+ else {}
+ ),
+ }
+
+ token = None
+ auth_type = config.get("auth_type")
+
+ if auth_type == "bearer" or auth_type is None:
+ # Default to bearer if not specified
+ token = f"{key}"
+ elif auth_type == "none":
+ token = None
+ elif auth_type == "session":
+ cookies = request.cookies
+ token = request.state.token.credentials
+ elif auth_type == "system_oauth":
+ cookies = request.cookies
+
+ oauth_token = None
+ try:
+ oauth_token = request.app.state.oauth_manager.get_oauth_token(
+ user.id,
+ request.cookies.get("oauth_session_id", None),
+ )
+ except Exception as e:
+ log.error(f"Error getting OAuth token: {e}")
+
+ if oauth_token:
+ token = f"{oauth_token.get('access_token', '')}"
+
+ if token:
+ headers["Authorization"] = f"Bearer {token}"
+
+ return headers, cookies
+
+
##########################################
#
# API routes
@@ -210,34 +278,23 @@ async def speech(request: Request, user=Depends(get_verified_user)):
return FileResponse(file_path)
url = request.app.state.config.OPENAI_API_BASE_URLS[idx]
+ key = request.app.state.config.OPENAI_API_KEYS[idx]
+ api_config = request.app.state.config.OPENAI_API_CONFIGS.get(
+ str(idx),
+ request.app.state.config.OPENAI_API_CONFIGS.get(url, {}), # Legacy support
+ )
+
+ headers, cookies = get_headers_and_cookies(
+ request, url, key, api_config, user=user
+ )
r = None
try:
r = requests.post(
url=f"{url}/audio/speech",
data=body,
- headers={
- "Content-Type": "application/json",
- "Authorization": f"Bearer {request.app.state.config.OPENAI_API_KEYS[idx]}",
- **(
- {
- "HTTP-Referer": "https://openwebui.com/",
- "X-Title": "Open WebUI",
- }
- if "openrouter.ai" in url
- else {}
- ),
- **(
- {
- "X-OpenWebUI-User-Name": quote(user.name, safe=" "),
- "X-OpenWebUI-User-Id": user.id,
- "X-OpenWebUI-User-Email": user.email,
- "X-OpenWebUI-User-Role": user.role,
- }
- if ENABLE_FORWARD_USER_INFO_HEADERS
- else {}
- ),
- },
+ headers=headers,
+ cookies=cookies,
stream=True,
)
@@ -401,7 +458,10 @@ async def get_filtered_models(models, user):
return filtered_models
-@cached(ttl=MODELS_CACHE_TTL)
+@cached(
+ ttl=MODELS_CACHE_TTL,
+ key=lambda _, user: f"openai_all_models_{user.id}" if user else "openai_all_models",
+)
async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
log.info("get_all_models()")
@@ -489,19 +549,9 @@ async def get_models(
timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST),
) as session:
try:
- headers = {
- "Content-Type": "application/json",
- **(
- {
- "X-OpenWebUI-User-Name": quote(user.name, safe=" "),
- "X-OpenWebUI-User-Id": user.id,
- "X-OpenWebUI-User-Email": user.email,
- "X-OpenWebUI-User-Role": user.role,
- }
- if ENABLE_FORWARD_USER_INFO_HEADERS
- else {}
- ),
- }
+ headers, cookies = get_headers_and_cookies(
+ request, url, key, api_config, user=user
+ )
if api_config.get("azure", False):
models = {
@@ -509,11 +559,10 @@ async def get_models(
"object": "list",
}
else:
- headers["Authorization"] = f"Bearer {key}"
-
async with session.get(
f"{url}/models",
headers=headers,
+ cookies=cookies,
ssl=AIOHTTP_CLIENT_SESSION_SSL,
) as r:
if r.status != 200:
@@ -572,7 +621,9 @@ class ConnectionVerificationForm(BaseModel):
@router.post("/verify")
async def verify_connection(
- form_data: ConnectionVerificationForm, user=Depends(get_admin_user)
+ request: Request,
+ form_data: ConnectionVerificationForm,
+ user=Depends(get_admin_user),
):
url = form_data.url
key = form_data.key
@@ -584,19 +635,9 @@ async def verify_connection(
timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST),
) as session:
try:
- headers = {
- "Content-Type": "application/json",
- **(
- {
- "X-OpenWebUI-User-Name": quote(user.name, safe=" "),
- "X-OpenWebUI-User-Id": user.id,
- "X-OpenWebUI-User-Email": user.email,
- "X-OpenWebUI-User-Role": user.role,
- }
- if ENABLE_FORWARD_USER_INFO_HEADERS
- else {}
- ),
- }
+ headers, cookies = get_headers_and_cookies(
+ request, url, key, api_config, user=user
+ )
if api_config.get("azure", False):
headers["api-key"] = key
@@ -605,6 +646,7 @@ async def verify_connection(
async with session.get(
url=f"{url}/openai/models?api-version={api_version}",
headers=headers,
+ cookies=cookies,
ssl=AIOHTTP_CLIENT_SESSION_SSL,
) as r:
try:
@@ -624,11 +666,10 @@ async def verify_connection(
return response_data
else:
- headers["Authorization"] = f"Bearer {key}"
-
async with session.get(
f"{url}/models",
headers=headers,
+ cookies=cookies,
ssl=AIOHTTP_CLIENT_SESSION_SSL,
) as r:
try:
@@ -836,32 +877,9 @@ async def generate_chat_completion(
convert_logit_bias_input_to_json(payload["logit_bias"])
)
- headers = {
- "Content-Type": "application/json",
- **(
- {
- "HTTP-Referer": "https://openwebui.com/",
- "X-Title": "Open WebUI",
- }
- if "openrouter.ai" in url
- else {}
- ),
- **(
- {
- "X-OpenWebUI-User-Name": quote(user.name, safe=" "),
- "X-OpenWebUI-User-Id": user.id,
- "X-OpenWebUI-User-Email": user.email,
- "X-OpenWebUI-User-Role": user.role,
- **(
- {"X-OpenWebUI-Chat-Id": metadata.get("chat_id")}
- if metadata and metadata.get("chat_id")
- else {}
- ),
- }
- if ENABLE_FORWARD_USER_INFO_HEADERS
- else {}
- ),
- }
+ headers, cookies = get_headers_and_cookies(
+ request, url, key, api_config, metadata, user=user
+ )
if api_config.get("azure", False):
api_version = api_config.get("api_version", "2023-03-15-preview")
@@ -871,7 +889,6 @@ async def generate_chat_completion(
request_url = f"{request_url}/chat/completions?api-version={api_version}"
else:
request_url = f"{url}/chat/completions"
- headers["Authorization"] = f"Bearer {key}"
payload = json.dumps(payload)
@@ -890,6 +907,7 @@ async def generate_chat_completion(
url=request_url,
data=payload,
headers=headers,
+ cookies=cookies,
ssl=AIOHTTP_CLIENT_SESSION_SSL,
)
@@ -951,31 +969,27 @@ async def embeddings(request: Request, form_data: dict, user):
models = request.app.state.OPENAI_MODELS
if model_id in models:
idx = models[model_id]["urlIdx"]
+
url = request.app.state.config.OPENAI_API_BASE_URLS[idx]
key = request.app.state.config.OPENAI_API_KEYS[idx]
+ api_config = request.app.state.config.OPENAI_API_CONFIGS.get(
+ str(idx),
+ request.app.state.config.OPENAI_API_CONFIGS.get(url, {}), # Legacy support
+ )
+
r = None
session = None
streaming = False
+
+ headers, cookies = get_headers_and_cookies(request, url, key, api_config, user=user)
try:
session = aiohttp.ClientSession(trust_env=True)
r = await session.request(
method="POST",
url=f"{url}/embeddings",
data=body,
- headers={
- "Authorization": f"Bearer {key}",
- "Content-Type": "application/json",
- **(
- {
- "X-OpenWebUI-User-Name": quote(user.name, safe=" "),
- "X-OpenWebUI-User-Id": user.id,
- "X-OpenWebUI-User-Email": user.email,
- "X-OpenWebUI-User-Role": user.role,
- }
- if ENABLE_FORWARD_USER_INFO_HEADERS and user
- else {}
- ),
- },
+ headers=headers,
+ cookies=cookies,
)
if "text/event-stream" in r.headers.get("Content-Type", ""):
@@ -1037,19 +1051,9 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
streaming = False
try:
- headers = {
- "Content-Type": "application/json",
- **(
- {
- "X-OpenWebUI-User-Name": quote(user.name, safe=" "),
- "X-OpenWebUI-User-Id": user.id,
- "X-OpenWebUI-User-Email": user.email,
- "X-OpenWebUI-User-Role": user.role,
- }
- if ENABLE_FORWARD_USER_INFO_HEADERS
- else {}
- ),
- }
+ headers, cookies = get_headers_and_cookies(
+ request, url, key, api_config, user=user
+ )
if api_config.get("azure", False):
api_version = api_config.get("api_version", "2023-03-15-preview")
@@ -1062,7 +1066,6 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
request_url = f"{url}/{path}?api-version={api_version}"
else:
- headers["Authorization"] = f"Bearer {key}"
request_url = f"{url}/{path}"
session = aiohttp.ClientSession(trust_env=True)
@@ -1071,6 +1074,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
url=request_url,
data=body,
headers=headers,
+ cookies=cookies,
ssl=AIOHTTP_CLIENT_SESSION_SSL,
)
diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py
index 738f2d05fc..dd5e2d5bc4 100644
--- a/backend/open_webui/routers/retrieval.py
+++ b/backend/open_webui/routers/retrieval.py
@@ -426,8 +426,13 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)):
"EXTERNAL_DOCUMENT_LOADER_API_KEY": request.app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY,
"TIKA_SERVER_URL": request.app.state.config.TIKA_SERVER_URL,
"DOCLING_SERVER_URL": request.app.state.config.DOCLING_SERVER_URL,
+ "DOCLING_DO_OCR": request.app.state.config.DOCLING_DO_OCR,
+ "DOCLING_FORCE_OCR": request.app.state.config.DOCLING_FORCE_OCR,
"DOCLING_OCR_ENGINE": request.app.state.config.DOCLING_OCR_ENGINE,
"DOCLING_OCR_LANG": request.app.state.config.DOCLING_OCR_LANG,
+ "DOCLING_PDF_BACKEND": request.app.state.config.DOCLING_PDF_BACKEND,
+ "DOCLING_TABLE_MODE": request.app.state.config.DOCLING_TABLE_MODE,
+ "DOCLING_PIPELINE": request.app.state.config.DOCLING_PIPELINE,
"DOCLING_DO_PICTURE_DESCRIPTION": request.app.state.config.DOCLING_DO_PICTURE_DESCRIPTION,
"DOCLING_PICTURE_DESCRIPTION_MODE": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE,
"DOCLING_PICTURE_DESCRIPTION_LOCAL": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL,
@@ -596,8 +601,13 @@ class ConfigForm(BaseModel):
TIKA_SERVER_URL: Optional[str] = None
DOCLING_SERVER_URL: Optional[str] = None
+ DOCLING_DO_OCR: Optional[bool] = None
+ DOCLING_FORCE_OCR: Optional[bool] = None
DOCLING_OCR_ENGINE: Optional[str] = None
DOCLING_OCR_LANG: Optional[str] = None
+ DOCLING_PDF_BACKEND: Optional[str] = None
+ DOCLING_TABLE_MODE: Optional[str] = None
+ DOCLING_PIPELINE: Optional[str] = None
DOCLING_DO_PICTURE_DESCRIPTION: Optional[bool] = None
DOCLING_PICTURE_DESCRIPTION_MODE: Optional[str] = None
DOCLING_PICTURE_DESCRIPTION_LOCAL: Optional[dict] = None
@@ -767,6 +777,16 @@ async def update_rag_config(
if form_data.DOCLING_SERVER_URL is not None
else request.app.state.config.DOCLING_SERVER_URL
)
+ request.app.state.config.DOCLING_DO_OCR = (
+ form_data.DOCLING_DO_OCR
+ if form_data.DOCLING_DO_OCR is not None
+ else request.app.state.config.DOCLING_DO_OCR
+ )
+ request.app.state.config.DOCLING_FORCE_OCR = (
+ form_data.DOCLING_FORCE_OCR
+ if form_data.DOCLING_FORCE_OCR is not None
+ else request.app.state.config.DOCLING_FORCE_OCR
+ )
request.app.state.config.DOCLING_OCR_ENGINE = (
form_data.DOCLING_OCR_ENGINE
if form_data.DOCLING_OCR_ENGINE is not None
@@ -777,7 +797,21 @@ async def update_rag_config(
if form_data.DOCLING_OCR_LANG is not None
else request.app.state.config.DOCLING_OCR_LANG
)
-
+ request.app.state.config.DOCLING_PDF_BACKEND = (
+ form_data.DOCLING_PDF_BACKEND
+ if form_data.DOCLING_PDF_BACKEND is not None
+ else request.app.state.config.DOCLING_PDF_BACKEND
+ )
+ request.app.state.config.DOCLING_TABLE_MODE = (
+ form_data.DOCLING_TABLE_MODE
+ if form_data.DOCLING_TABLE_MODE is not None
+ else request.app.state.config.DOCLING_TABLE_MODE
+ )
+ request.app.state.config.DOCLING_PIPELINE = (
+ form_data.DOCLING_PIPELINE
+ if form_data.DOCLING_PIPELINE is not None
+ else request.app.state.config.DOCLING_PIPELINE
+ )
request.app.state.config.DOCLING_DO_PICTURE_DESCRIPTION = (
form_data.DOCLING_DO_PICTURE_DESCRIPTION
if form_data.DOCLING_DO_PICTURE_DESCRIPTION is not None
@@ -1062,8 +1096,13 @@ async def update_rag_config(
"EXTERNAL_DOCUMENT_LOADER_API_KEY": request.app.state.config.EXTERNAL_DOCUMENT_LOADER_API_KEY,
"TIKA_SERVER_URL": request.app.state.config.TIKA_SERVER_URL,
"DOCLING_SERVER_URL": request.app.state.config.DOCLING_SERVER_URL,
+ "DOCLING_DO_OCR": request.app.state.config.DOCLING_DO_OCR,
+ "DOCLING_FORCE_OCR": request.app.state.config.DOCLING_FORCE_OCR,
"DOCLING_OCR_ENGINE": request.app.state.config.DOCLING_OCR_ENGINE,
"DOCLING_OCR_LANG": request.app.state.config.DOCLING_OCR_LANG,
+ "DOCLING_PDF_BACKEND": request.app.state.config.DOCLING_PDF_BACKEND,
+ "DOCLING_TABLE_MODE": request.app.state.config.DOCLING_TABLE_MODE,
+ "DOCLING_PIPELINE": request.app.state.config.DOCLING_PIPELINE,
"DOCLING_DO_PICTURE_DESCRIPTION": request.app.state.config.DOCLING_DO_PICTURE_DESCRIPTION,
"DOCLING_PICTURE_DESCRIPTION_MODE": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE,
"DOCLING_PICTURE_DESCRIPTION_LOCAL": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL,
@@ -1453,8 +1492,13 @@ def process_file(
TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL,
DOCLING_SERVER_URL=request.app.state.config.DOCLING_SERVER_URL,
DOCLING_PARAMS={
+ "do_ocr": request.app.state.config.DOCLING_DO_OCR,
+ "force_ocr": request.app.state.config.DOCLING_FORCE_OCR,
"ocr_engine": request.app.state.config.DOCLING_OCR_ENGINE,
"ocr_lang": request.app.state.config.DOCLING_OCR_LANG,
+ "pdf_backend": request.app.state.config.DOCLING_PDF_BACKEND,
+ "table_mode": request.app.state.config.DOCLING_TABLE_MODE,
+ "pipeline": request.app.state.config.DOCLING_PIPELINE,
"do_picture_description": request.app.state.config.DOCLING_DO_PICTURE_DESCRIPTION,
"picture_description_mode": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_MODE,
"picture_description_local": request.app.state.config.DOCLING_PICTURE_DESCRIPTION_LOCAL,
@@ -1945,6 +1989,8 @@ async def process_web_search(
):
urls = []
+ result_items = []
+
try:
logging.info(
f"trying to web search with {request.app.state.config.WEB_SEARCH_ENGINE, form_data.queries}"
@@ -1966,6 +2012,7 @@ async def process_web_search(
if result:
for item in result:
if item and item.link:
+ result_items.append(item)
urls.append(item.link)
urls = list(dict.fromkeys(urls))
@@ -2010,12 +2057,16 @@ async def process_web_search(
urls = [
doc.metadata.get("source") for doc in docs if doc.metadata.get("source")
] # only keep the urls returned by the loader
+ result_items = [
+ dict(item) for item in result_items if item.link in urls
+ ] # only keep the search results that have been loaded
if request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL:
return {
"status": True,
"collection_name": None,
"filenames": urls,
+ "items": result_items,
"docs": [
{
"content": doc.page_content,
@@ -2048,6 +2099,7 @@ async def process_web_search(
return {
"status": True,
"collection_names": [collection_name],
+ "items": result_items,
"filenames": urls,
"loaded_count": len(docs),
}
@@ -2075,7 +2127,9 @@ def query_doc_handler(
user=Depends(get_verified_user),
):
try:
- if request.app.state.config.ENABLE_RAG_HYBRID_SEARCH:
+ if request.app.state.config.ENABLE_RAG_HYBRID_SEARCH and (
+ form_data.hybrid is None or form_data.hybrid
+ ):
collection_results = {}
collection_results[form_data.collection_name] = VECTOR_DB_CLIENT.get(
collection_name=form_data.collection_name
@@ -2145,7 +2199,9 @@ def query_collection_handler(
user=Depends(get_verified_user),
):
try:
- if request.app.state.config.ENABLE_RAG_HYBRID_SEARCH:
+ if request.app.state.config.ENABLE_RAG_HYBRID_SEARCH and (
+ form_data.hybrid is None or form_data.hybrid
+ ):
return query_collection_with_hybrid_search(
collection_names=form_data.collection_names,
queries=[form_data.query],
diff --git a/backend/open_webui/routers/tasks.py b/backend/open_webui/routers/tasks.py
index e49602094f..7585466f69 100644
--- a/backend/open_webui/routers/tasks.py
+++ b/backend/open_webui/routers/tasks.py
@@ -470,6 +470,10 @@ async def generate_queries(
detail=f"Query generation is disabled",
)
+ if getattr(request.state, "cached_queries", None):
+ log.info(f"Reusing cached queries: {request.state.cached_queries}")
+ return request.state.cached_queries
+
if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
models = {
request.state.model["id"]: request.state.model,
diff --git a/backend/open_webui/routers/tools.py b/backend/open_webui/routers/tools.py
index c017233765..5f82e7f1bd 100644
--- a/backend/open_webui/routers/tools.py
+++ b/backend/open_webui/routers/tools.py
@@ -4,6 +4,7 @@ from typing import Optional
import time
import re
import aiohttp
+from open_webui.models.groups import Groups
from pydantic import BaseModel, HttpUrl
from fastapi import APIRouter, Depends, HTTPException, Request, status
@@ -71,11 +72,12 @@ async def get_tools(request: Request, user=Depends(get_verified_user)):
# Admin can see all tools
return tools
else:
+ user_group_ids = {group.id for group in Groups.get_groups_by_member_id(user.id)}
tools = [
tool
for tool in tools
if tool.user_id == user.id
- or has_access(user.id, "read", tool.access_control)
+ or has_access(user.id, "read", tool.access_control, user_group_ids)
]
return tools
diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py
index 7b27b45b9d..5b331dce73 100644
--- a/backend/open_webui/routers/users.py
+++ b/backend/open_webui/routers/users.py
@@ -10,6 +10,8 @@ from pydantic import BaseModel
from open_webui.models.auths import Auths
+from open_webui.models.oauth_sessions import OAuthSessions
+
from open_webui.models.groups import Groups
from open_webui.models.chats import Chats
from open_webui.models.users import (
@@ -146,6 +148,10 @@ class ChatPermissions(BaseModel):
params: bool = True
file_upload: bool = True
delete: bool = True
+ delete_message: bool = True
+ continue_response: bool = True
+ regenerate_response: bool = True
+ rate_response: bool = True
edit: bool = True
share: bool = True
export: bool = True
@@ -336,6 +342,18 @@ async def get_user_by_id(user_id: str, user=Depends(get_verified_user)):
)
+@router.get("/{user_id}/oauth/sessions", response_model=Optional[dict])
+async def get_user_oauth_sessions_by_id(user_id: str, user=Depends(get_admin_user)):
+ sessions = OAuthSessions.get_sessions_by_user_id(user_id)
+ if sessions and len(sessions) > 0:
+ return sessions
+ else:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail=ERROR_MESSAGES.USER_NOT_FOUND,
+ )
+
+
############################
# GetUserProfileImageById
############################
diff --git a/backend/open_webui/socket/main.py b/backend/open_webui/socket/main.py
index 5570348093..b64eab08ac 100644
--- a/backend/open_webui/socket/main.py
+++ b/backend/open_webui/socket/main.py
@@ -115,7 +115,7 @@ if WEBSOCKET_MANAGER == "redis":
clean_up_lock = RedisLock(
redis_url=WEBSOCKET_REDIS_URL,
- lock_name="usage_cleanup_lock",
+ lock_name=f"{REDIS_KEY_PREFIX}:usage_cleanup_lock",
timeout_secs=WEBSOCKET_REDIS_LOCK_TIMEOUT,
redis_sentinels=redis_sentinels,
redis_cluster=WEBSOCKET_REDIS_CLUSTER,
@@ -705,6 +705,42 @@ def get_event_emitter(request_info, update_db=True):
},
)
+ if "type" in event_data and event_data["type"] == "files":
+ message = Chats.get_message_by_id_and_message_id(
+ request_info["chat_id"],
+ request_info["message_id"],
+ )
+
+ files = event_data.get("data", {}).get("files", [])
+ files.extend(message.get("files", []))
+
+ Chats.upsert_message_to_chat_by_id_and_message_id(
+ request_info["chat_id"],
+ request_info["message_id"],
+ {
+ "files": files,
+ },
+ )
+
+ if event_data.get("type") in ["source", "citation"]:
+ data = event_data.get("data", {})
+ if data.get("type") == None:
+ message = Chats.get_message_by_id_and_message_id(
+ request_info["chat_id"],
+ request_info["message_id"],
+ )
+
+ sources = message.get("sources", [])
+ sources.append(data)
+
+ Chats.upsert_message_to_chat_by_id_and_message_id(
+ request_info["chat_id"],
+ request_info["message_id"],
+ {
+ "sources": sources,
+ },
+ )
+
return __event_emitter__
diff --git a/backend/open_webui/tasks.py b/backend/open_webui/tasks.py
index 714c532fca..a15e8ac146 100644
--- a/backend/open_webui/tasks.py
+++ b/backend/open_webui/tasks.py
@@ -153,9 +153,9 @@ async def stop_task(redis, task_id: str):
# Optionally check if task_id still in Redis a few moments later for feedback?
return {"status": True, "message": f"Stop signal sent for {task_id}"}
- task = tasks.pop(task_id)
+ task = tasks.pop(task_id, None)
if not task:
- raise ValueError(f"Task with ID {task_id} not found.")
+ return {"status": False, "message": f"Task with ID {task_id} not found."}
task.cancel() # Request task cancellation
try:
diff --git a/backend/open_webui/utils/access_control.py b/backend/open_webui/utils/access_control.py
index c36d861ad6..1529773c44 100644
--- a/backend/open_webui/utils/access_control.py
+++ b/backend/open_webui/utils/access_control.py
@@ -1,4 +1,4 @@
-from typing import Optional, Union, List, Dict, Any
+from typing import Optional, Set, Union, List, Dict, Any
from open_webui.models.users import Users, UserModel
from open_webui.models.groups import Groups
@@ -109,12 +109,15 @@ def has_access(
user_id: str,
type: str = "write",
access_control: Optional[dict] = None,
+ user_group_ids: Optional[Set[str]] = None,
) -> bool:
if access_control is None:
return type == "read"
- user_groups = Groups.get_groups_by_member_id(user_id)
- user_group_ids = [group.id for group in user_groups]
+ if user_group_ids is None:
+ user_groups = Groups.get_groups_by_member_id(user_id)
+ user_group_ids = {group.id for group in user_groups}
+
permission_access = access_control.get(type, {})
permitted_group_ids = permission_access.get("group_ids", [])
permitted_user_ids = permission_access.get("user_ids", [])
diff --git a/backend/open_webui/utils/auth.py b/backend/open_webui/utils/auth.py
index 228dd3e30a..f941ef9263 100644
--- a/backend/open_webui/utils/auth.py
+++ b/backend/open_webui/utils/auth.py
@@ -261,55 +261,67 @@ def get_current_user(
return user
# auth by jwt token
- try:
- data = decode_token(token)
- except Exception as e:
- raise HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail="Invalid token",
- )
- if data is not None and "id" in data:
- user = Users.get_user_by_id(data["id"])
- if user is None:
+ try:
+ try:
+ data = decode_token(token)
+ except Exception as e:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
- detail=ERROR_MESSAGES.INVALID_TOKEN,
+ detail="Invalid token",
)
- else:
- if WEBUI_AUTH_TRUSTED_EMAIL_HEADER:
- trusted_email = request.headers.get(
- WEBUI_AUTH_TRUSTED_EMAIL_HEADER, ""
- ).lower()
- if trusted_email and user.email != trusted_email:
- # Delete the token cookie
- response.delete_cookie("token")
- # Delete OAuth token if present
- if request.cookies.get("oauth_id_token"):
- response.delete_cookie("oauth_id_token")
- raise HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail="User mismatch. Please sign in again.",
+
+ if data is not None and "id" in data:
+ user = Users.get_user_by_id(data["id"])
+ if user is None:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail=ERROR_MESSAGES.INVALID_TOKEN,
+ )
+ else:
+ if WEBUI_AUTH_TRUSTED_EMAIL_HEADER:
+ trusted_email = request.headers.get(
+ WEBUI_AUTH_TRUSTED_EMAIL_HEADER, ""
+ ).lower()
+ if trusted_email and user.email != trusted_email:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="User mismatch. Please sign in again.",
+ )
+
+ # Add user info to current span
+ current_span = trace.get_current_span()
+ if current_span:
+ current_span.set_attribute("client.user.id", user.id)
+ current_span.set_attribute("client.user.email", user.email)
+ current_span.set_attribute("client.user.role", user.role)
+ current_span.set_attribute("client.auth.type", "jwt")
+
+ # Refresh the user's last active timestamp asynchronously
+ # to prevent blocking the request
+ if background_tasks:
+ background_tasks.add_task(
+ Users.update_user_last_active_by_id, user.id
)
+ return user
+ else:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail=ERROR_MESSAGES.UNAUTHORIZED,
+ )
+ except Exception as e:
+ # Delete the token cookie
+ if request.cookies.get("token"):
+ response.delete_cookie("token")
- # Add user info to current span
- current_span = trace.get_current_span()
- if current_span:
- current_span.set_attribute("client.user.id", user.id)
- current_span.set_attribute("client.user.email", user.email)
- current_span.set_attribute("client.user.role", user.role)
- current_span.set_attribute("client.auth.type", "jwt")
+ if request.cookies.get("oauth_id_token"):
+ response.delete_cookie("oauth_id_token")
- # Refresh the user's last active timestamp asynchronously
- # to prevent blocking the request
- if background_tasks:
- background_tasks.add_task(Users.update_user_last_active_by_id, user.id)
- return user
- else:
- raise HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail=ERROR_MESSAGES.UNAUTHORIZED,
- )
+ # Delete OAuth session if present
+ if request.cookies.get("oauth_session_id"):
+ response.delete_cookie("oauth_session_id")
+
+ raise e
def get_current_user_by_api_key(api_key: str):
diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py
index b991c1b986..ae2c96c6da 100644
--- a/backend/open_webui/utils/middleware.py
+++ b/backend/open_webui/utils/middleware.py
@@ -98,8 +98,10 @@ from open_webui.env import (
SRC_LOG_LEVELS,
GLOBAL_LOG_LEVEL,
CHAT_RESPONSE_STREAM_DELTA_CHUNK_SIZE,
+ CHAT_RESPONSE_MAX_TOOL_CALL_RETRIES,
BYPASS_MODEL_ACCESS_CONTROL,
ENABLE_REALTIME_CHAT_SAVE,
+ ENABLE_QUERIES_CACHE,
)
from open_webui.constants import TASKS
@@ -109,6 +111,20 @@ log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MAIN"])
+DEFAULT_REASONING_TAGS = [
+ ("Tool Executed
\nExecuting...
\n