diff --git a/CHANGELOG.md b/CHANGELOG.md index 48a60634c9..d827cff9c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.1.103] - 2024-02-25 + +### Added + +- **πŸ”— Built-in LiteLLM Proxy**: Now includes LiteLLM proxy within Open WebUI for enhanced functionality. + + - Easily integrate existing LiteLLM configurations using `-v /path/to/config.yaml:/app/backend/data/litellm/config.yaml` flag. + - When utilizing Docker container to run Open WebUI, ensure connections to localhost use `host.docker.internal`. + +- **πŸ–ΌοΈ Image Generation Enhancements**: Introducing Advanced Settings with Image Preview Feature. + - Customize image generation by setting the number of steps; defaults to A1111 value. + +### Fixed + +- Resolved issue with RAG scan halting document loading upon encountering unsupported MIME types or exceptions (Issue #866). + +### Changed + +- Ollama is no longer required to run Open WebUI. +- Access our comprehensive documentation at [Open WebUI Documentation](https://docs.openwebui.com/). + ## [0.1.102] - 2024-02-22 ### Added diff --git a/README.md b/README.md index bed73e09c0..7c40239c7d 100644 --- a/README.md +++ b/README.md @@ -103,14 +103,24 @@ Don't forget to explore our sibling project, [Open WebUI Community](https://open - After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! πŸ˜„ -#### Troubleshooting +#### Open WebUI: Server Connection Error -Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s). +If you're experiencing connection issues, it’s often due to the WebUI docker container not being able to reach the Ollama server at 127.0.0.1:11434 (host.docker.internal:11434) inside the container . Use the `--network=host` flag in your docker command to resolve this. Note that the port changes from 3000 to 8080, resulting in the link: `http://localhost:8080`. + +**Example Docker Command**: + +```bash +docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_API_BASE_URL=http://127.0.0.1:11434/api --name open-webui --restart always ghcr.io/open-webui/open-webui:main +``` ### Other Installation Methods We offer various installation alternatives, including non-Docker methods, Docker Compose, Kustomize, and Helm. Visit our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/) or join our [Discord community](https://discord.gg/5rJgQTnV4s) for comprehensive guidance. +### Troubleshooting + +Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s). + ### Keeping Your Docker Installation Up-to-Date In case you want to update your local Docker installation to the latest version, you can do it with [Watchtower](https://containrrr.dev/watchtower/): diff --git a/backend/.dockerignore b/backend/.dockerignore index 11f9256fec..97ab32835d 100644 --- a/backend/.dockerignore +++ b/backend/.dockerignore @@ -4,4 +4,11 @@ _old uploads .ipynb_checkpoints *.db -_test \ No newline at end of file +_test +!/data +/data/* +!/data/litellm +/data/litellm/* +!data/litellm/config.yaml + +!data/config.json \ No newline at end of file diff --git a/backend/.gitignore b/backend/.gitignore index 16180123c8..ea83b34f43 100644 --- a/backend/.gitignore +++ b/backend/.gitignore @@ -6,6 +6,11 @@ uploads *.db _test Pipfile -data/* +!/data +/data/* +!/data/litellm +/data/litellm/* +!data/litellm/config.yaml + !data/config.json .webui_secret_key \ No newline at end of file diff --git a/backend/apps/images/main.py b/backend/apps/images/main.py index 39d3f96aa0..dfa1f187a8 100644 --- a/backend/apps/images/main.py +++ b/backend/apps/images/main.py @@ -35,6 +35,7 @@ app.add_middleware( app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL app.state.ENABLED = app.state.AUTOMATIC1111_BASE_URL != "" app.state.IMAGE_SIZE = "512x512" +app.state.IMAGE_STEPS = 50 @app.get("/enabled", response_model=bool) @@ -49,7 +50,7 @@ async def toggle_enabled(request: Request, user=Depends(get_admin_user)): app.state.ENABLED = not app.state.ENABLED return app.state.ENABLED except Exception as e: - raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) class UrlUpdateForm(BaseModel): @@ -102,6 +103,32 @@ async def update_image_size( ) +class ImageStepsUpdateForm(BaseModel): + steps: int + + +@app.get("/steps") +async def get_image_size(user=Depends(get_admin_user)): + return {"IMAGE_STEPS": app.state.IMAGE_STEPS} + + +@app.post("/steps/update") +async def update_image_size( + form_data: ImageStepsUpdateForm, user=Depends(get_admin_user) +): + if form_data.steps >= 0: + app.state.IMAGE_STEPS = form_data.steps + return { + "IMAGE_STEPS": app.state.IMAGE_STEPS, + "status": True, + } + else: + raise HTTPException( + status_code=400, + detail=ERROR_MESSAGES.INCORRECT_FORMAT(" (e.g., 50)."), + ) + + @app.get("/models") def get_models(user=Depends(get_current_user)): try: @@ -109,7 +136,8 @@ def get_models(user=Depends(get_current_user)): models = r.json() return models except Exception as e: - raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) + app.state.ENABLED = False + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) @app.get("/models/default") @@ -120,7 +148,8 @@ async def get_default_model(user=Depends(get_admin_user)): return {"model": options["sd_model_checkpoint"]} except Exception as e: - raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) + app.state.ENABLED = False + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) class UpdateModelForm(BaseModel): @@ -177,6 +206,9 @@ def generate_image( "height": height, } + if app.state.IMAGE_STEPS != None: + data["steps"] = app.state.IMAGE_STEPS + if form_data.negative_prompt != None: data["negative_prompt"] = form_data.negative_prompt @@ -190,4 +222,4 @@ def generate_image( return r.json() except Exception as e: print(e) - raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) diff --git a/backend/config.py b/backend/config.py index fadae68ce0..effcd24620 100644 --- a/backend/config.py +++ b/backend/config.py @@ -6,6 +6,8 @@ from bs4 import BeautifulSoup from pathlib import Path import json +import yaml + import markdown import requests import shutil @@ -83,8 +85,6 @@ for version in soup.find_all("h2"): # Find the next sibling that is a h3 tag (section title) current = version.find_next_sibling() - print(current) - while current and current.name != "h2": if current.name == "h3": section_title = current.get_text().lower() # e.g., "added", "fixed" @@ -165,6 +165,40 @@ Path(CACHE_DIR).mkdir(parents=True, exist_ok=True) DOCS_DIR = f"{DATA_DIR}/docs" Path(DOCS_DIR).mkdir(parents=True, exist_ok=True) + +#################################### +# LITELLM_CONFIG +#################################### + + +def create_config_file(file_path): + directory = os.path.dirname(file_path) + + # Check if directory exists, if not, create it + if not os.path.exists(directory): + os.makedirs(directory) + + # Data to write into the YAML file + config_data = { + "general_settings": {}, + "litellm_settings": {}, + "model_list": [], + "router_settings": {}, + } + + # Write data to YAML file + with open(file_path, "w") as file: + yaml.dump(config_data, file) + + +LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml" + +if not os.path.exists(LITELLM_CONFIG_PATH): + print("Config file doesn't exist. Creating...") + create_config_file(LITELLM_CONFIG_PATH) + print("Config file created successfully.") + + #################################### # OLLAMA_API_BASE_URL #################################### diff --git a/backend/data/litellm/config.yaml b/backend/data/litellm/config.yaml new file mode 100644 index 0000000000..7d9d2b7230 --- /dev/null +++ b/backend/data/litellm/config.yaml @@ -0,0 +1,4 @@ +general_settings: {} +litellm_settings: {} +model_list: [] +router_settings: {} diff --git a/backend/main.py b/backend/main.py index 0be56752b2..a432e9ed65 100644 --- a/backend/main.py +++ b/backend/main.py @@ -2,25 +2,31 @@ from bs4 import BeautifulSoup import json import markdown import time +import os +import sys - -from fastapi import FastAPI, Request +from fastapi import FastAPI, Request, Depends from fastapi.staticfiles import StaticFiles from fastapi import HTTPException +from fastapi.responses import JSONResponse from fastapi.middleware.wsgi import WSGIMiddleware from fastapi.middleware.cors import CORSMiddleware from starlette.exceptions import HTTPException as StarletteHTTPException +from litellm.proxy.proxy_server import ProxyConfig, initialize +from litellm.proxy.proxy_server import app as litellm_app + from apps.ollama.main import app as ollama_app from apps.openai.main import app as openai_app from apps.audio.main import app as audio_app from apps.images.main import app as images_app from apps.rag.main import app as rag_app - from apps.web.main import app as webui_app + from config import WEBUI_NAME, ENV, VERSION, CHANGELOG, FRONTEND_BUILD_DIR +from utils.utils import get_http_authorization_cred, get_current_user class SPAStaticFiles(StaticFiles): @@ -34,6 +40,21 @@ class SPAStaticFiles(StaticFiles): raise ex +proxy_config = ProxyConfig() + + +async def config(): + router, model_list, general_settings = await proxy_config.load_config( + router=None, config_file_path="./data/litellm/config.yaml" + ) + + await initialize(config="./data/litellm/config.yaml", telemetry=False) + + +async def startup(): + await config() + + app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None) origins = ["*"] @@ -47,6 +68,11 @@ app.add_middleware( ) +@app.on_event("startup") +async def on_startup(): + await startup() + + @app.middleware("http") async def check_url(request: Request, call_next): start_time = int(time.time()) @@ -57,7 +83,23 @@ async def check_url(request: Request, call_next): return response +@litellm_app.middleware("http") +async def auth_middleware(request: Request, call_next): + auth_header = request.headers.get("Authorization", "") + + if ENV != "dev": + try: + user = get_current_user(get_http_authorization_cred(auth_header)) + print(user) + except Exception as e: + return JSONResponse(status_code=400, content={"detail": str(e)}) + + response = await call_next(request) + return response + + app.mount("/api/v1", webui_app) +app.mount("/litellm/api", litellm_app) app.mount("/ollama/api", ollama_app) app.mount("/openai/api", openai_app) diff --git a/backend/requirements.txt b/backend/requirements.txt index 56e1d36eb7..0cacacd800 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -16,6 +16,10 @@ aiohttp peewee bcrypt +litellm +apscheduler +google-generativeai + langchain langchain-community chromadb diff --git a/backend/utils/utils.py b/backend/utils/utils.py index c6d018145a..32724af398 100644 --- a/backend/utils/utils.py +++ b/backend/utils/utils.py @@ -58,6 +58,14 @@ def extract_token_from_auth_header(auth_header: str): return auth_header[len("Bearer ") :] +def get_http_authorization_cred(auth_header: str): + try: + scheme, credentials = auth_header.split(" ") + return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) + except: + raise ValueError(ERROR_MESSAGES.INVALID_TOKEN) + + def get_current_user( auth_token: HTTPAuthorizationCredentials = Depends(bearer_security), ): diff --git a/package.json b/package.json index 30549fdd59..7938558db5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.1.102", + "version": "0.1.103", "private": true, "scripts": { "dev": "vite dev --host", diff --git a/src/app.html b/src/app.html index 9b1099b0b2..6216e56fab 100644 --- a/src/app.html +++ b/src/app.html @@ -5,6 +5,7 @@ + @@ -45,7 +45,7 @@ {#if model.name === 'hr'}
{:else} - diff --git a/src/lib/components/chat/Settings/About.svelte b/src/lib/components/chat/Settings/About.svelte index 6933ed3b6f..60c0d7168b 100644 --- a/src/lib/components/chat/Settings/About.svelte +++ b/src/lib/components/chat/Settings/About.svelte @@ -38,16 +38,18 @@ -
+ {#if ollamaVersion} +
-
-
Ollama Version
-
-
- {ollamaVersion ?? 'N/A'} +
+
Ollama Version
+
+
+ {ollamaVersion ?? 'N/A'} +
-
+ {/if}
diff --git a/src/lib/components/chat/Settings/Connections.svelte b/src/lib/components/chat/Settings/Connections.svelte index 462d300556..f4c0e04df0 100644 --- a/src/lib/components/chat/Settings/Connections.svelte +++ b/src/lib/components/chat/Settings/Connections.svelte @@ -3,7 +3,7 @@ import { createEventDispatcher, onMount } from 'svelte'; const dispatch = createEventDispatcher(); - import { getOllamaAPIUrl, updateOllamaAPIUrl } from '$lib/apis/ollama'; + import { getOllamaAPIUrl, getOllamaVersion, updateOllamaAPIUrl } from '$lib/apis/ollama'; import { getOpenAIKey, getOpenAIUrl, updateOpenAIKey, updateOpenAIUrl } from '$lib/apis/openai'; import toast from 'svelte-french-toast'; @@ -15,6 +15,9 @@ let OPENAI_API_KEY = ''; let OPENAI_API_BASE_URL = ''; + let showOpenAI = false; + let showLiteLLM = false; + const updateOpenAIHandler = async () => { OPENAI_API_BASE_URL = await updateOpenAIUrl(localStorage.token, OPENAI_API_BASE_URL); OPENAI_API_KEY = await updateOpenAIKey(localStorage.token, OPENAI_API_KEY); @@ -24,11 +27,15 @@ const updateOllamaAPIUrlHandler = async () => { API_BASE_URL = await updateOllamaAPIUrl(localStorage.token, API_BASE_URL); - const _models = await getModels('ollama'); - if (_models.length > 0) { + const ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => { + toast.error(error); + return null; + }); + + if (ollamaVersion) { toast.success('Server connection verified'); - await models.set(_models); + await models.set(await getModels()); } }; @@ -42,7 +49,7 @@
{ updateOpenAIHandler(); dispatch('save'); @@ -53,81 +60,100 @@ // }); }} > -
-
Ollama API URL
-
-
- +
+
+
+
+
OpenAI API
+ +
+ + {#if showOpenAI} +
+
API Key
+
+
+ +
+
+
+ +
+
API Base URL
+
+
+ +
+
+
+ WebUI will make requests to '{OPENAI_API_BASE_URL}/chat' +
+
+ {/if}
- -
- -
- Trouble accessing Ollama? - - Click here for help. - -
-
- -
- -
-
-
OpenAI API Key
-
-
- -
+ + + +
-
-
-
OpenAI API Base URL
-
-
- -
-
- WebUI will make requests to '{OPENAI_API_BASE_URL}/chat' + Trouble accessing Ollama? + + Click here for help. +
diff --git a/src/lib/components/chat/Settings/General.svelte b/src/lib/components/chat/Settings/General.svelte index 10ee6c7295..43826f1aac 100644 --- a/src/lib/components/chat/Settings/General.svelte +++ b/src/lib/components/chat/Settings/General.svelte @@ -84,7 +84,7 @@
-
+
WebUI Settings
diff --git a/src/lib/components/chat/Settings/Images.svelte b/src/lib/components/chat/Settings/Images.svelte index 94cb3213b8..cf64b7d4c0 100644 --- a/src/lib/components/chat/Settings/Images.svelte +++ b/src/lib/components/chat/Settings/Images.svelte @@ -12,7 +12,9 @@ toggleImageGenerationEnabledStatus, updateAUTOMATIC1111Url, updateDefaultDiffusionModel, - updateImageSize + updateImageSize, + getImageSteps, + updateImageSteps } from '$lib/apis/images'; import { getBackendConfig } from '$lib/apis'; const dispatch = createEventDispatcher(); @@ -21,20 +23,23 @@ let loading = false; - let enableImageGeneration = true; + let enableImageGeneration = false; let AUTOMATIC1111_BASE_URL = ''; let selectedModel = ''; - let models = []; + let models = null; let imageSize = ''; + let steps = 50; const getModels = async () => { models = await getDiffusionModels(localStorage.token).catch((error) => { toast.error(error); return null; }); - selectedModel = await getDefaultDiffusionModel(localStorage.token); + selectedModel = await getDefaultDiffusionModel(localStorage.token).catch((error) => { + return ''; + }); }; const updateAUTOMATIC1111UrlHandler = async () => { @@ -83,6 +88,7 @@ if (enableImageGeneration && AUTOMATIC1111_BASE_URL) { imageSize = await getImageSize(localStorage.token); + steps = await getImageSteps(localStorage.token); getModels(); } } @@ -98,12 +104,16 @@ toast.error(error); return null; }); + await updateImageSteps(localStorage.token, steps).catch((error) => { + toast.error(error); + return null; + }); dispatch('save'); loading = false; }} > -
+
Image Settings
@@ -188,7 +198,7 @@ {#if !selectedModel} {/if} - {#each models as model} + {#each models ?? [] as model} @@ -210,6 +220,19 @@
+ +
+
Set Steps
+
+
+ +
+
+
{/if}
diff --git a/src/lib/components/chat/Settings/Models.svelte b/src/lib/components/chat/Settings/Models.svelte index c7ca790814..455927dfc8 100644 --- a/src/lib/components/chat/Settings/Models.svelte +++ b/src/lib/components/chat/Settings/Models.svelte @@ -2,14 +2,33 @@ import queue from 'async/queue'; import toast from 'svelte-french-toast'; - import { createModel, deleteModel, pullModel } from '$lib/apis/ollama'; + import { createModel, deleteModel, getOllamaVersion, pullModel } from '$lib/apis/ollama'; import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants'; import { WEBUI_NAME, models, user } from '$lib/stores'; import { splitStream } from '$lib/utils'; + import { onMount } from 'svelte'; + import { addLiteLLMModel, deleteLiteLLMModel, getLiteLLMModelInfo } from '$lib/apis/litellm'; export let getModels: Function; + let showLiteLLM = false; + let showLiteLLMParams = false; + + let liteLLMModelInfo = []; + + let liteLLMModel = ''; + let liteLLMModelName = ''; + let liteLLMAPIBase = ''; + let liteLLMAPIKey = ''; + let liteLLMRPM = ''; + + let deleteLiteLLMModelId = ''; + + $: liteLLMModelName = liteLLMModel; + // Models + let showExperimentalOllama = false; + let ollamaVersion = ''; const MAX_PARALLEL_DOWNLOADS = 3; const modelDownloadQueue = queue( (task: { modelName: string }, cb) => @@ -286,256 +305,184 @@ opts.callback({ success: true, modelName: opts.modelName }); } }; + + const addLiteLLMModelHandler = async () => { + if (!liteLLMModelInfo.find((info) => info.model_name === liteLLMModelName)) { + const res = await addLiteLLMModel(localStorage.token, { + name: liteLLMModelName, + model: liteLLMModel, + api_base: liteLLMAPIBase, + api_key: liteLLMAPIKey, + rpm: liteLLMRPM + }).catch((error) => { + toast.error(error); + return null; + }); + + if (res) { + if (res.message) { + toast.success(res.message); + } + } + } else { + toast.error(`Model ${liteLLMModelName} already exists.`); + } + + liteLLMModelName = ''; + liteLLMModel = ''; + liteLLMAPIBase = ''; + liteLLMAPIKey = ''; + liteLLMRPM = ''; + + liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token); + models.set(await getModels()); + }; + + const deleteLiteLLMModelHandler = async () => { + const res = await deleteLiteLLMModel(localStorage.token, deleteLiteLLMModelId).catch( + (error) => { + toast.error(error); + return null; + } + ); + + if (res) { + if (res.message) { + toast.success(res.message); + } + } + + deleteLiteLLMModelId = ''; + liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token); + models.set(await getModels()); + }; + + onMount(async () => { + ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false); + liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token); + });
-
-
-
Pull a model from Ollama.com
-
-
- -
- -
- -
- To access the available model names for downloading, click here. -
- - {#if Object.keys(modelDownloadStatus).length > 0} - {#each Object.keys(modelDownloadStatus) as model} -
-
{model}
-
-
- {modelDownloadStatus[model].pullProgress ?? 0}% -
-
- {modelDownloadStatus[model].digest} -
-
-
- {/each} - {/if} -
- -
- -
-
Delete a model
-
-
- -
- -
-
- -
- - { - uploadModelHandler(); - }} - > -
-
- Upload a GGUF model (Experimental) -
- - -
- -
-
- {#if modelUploadMode === 'file'} -
- { - console.log(modelInputFile); - }} - accept=".gguf" - required - hidden - /> - - -
- {:else} -
- -
- {/if} -
- - {#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')} - +
+ +
+ To access the available model names for downloading, click here. +
+ + {#if Object.keys(modelDownloadStatus).length > 0} + {#each Object.keys(modelDownloadStatus) as model} +
+
{model}
+
+
+ {modelDownloadStatus[model].pullProgress ?? 0}% +
+
+ {modelDownloadStatus[model].digest} +
+
- {:else} + {/each} + {/if} +
+ +
+
Delete a model
+
+
+ +
+ +
+
+ +
+
+
Experimental
+ +
+
+ + {#if showExperimentalOllama} + { + uploadModelHandler(); + }} + > +
+
Upload a GGUF model
+ + +
+ +
+
+ {#if modelUploadMode === 'file'} +
+ { + console.log(modelInputFile); + }} + accept=".gguf" + required + hidden + /> + + +
+ {:else} +
+ +
+ {/if} +
+ + {#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')} + + {/if} +
+ + {#if (modelUploadMode === 'file' && modelInputFile && modelInputFile.length > 0) || (modelUploadMode === 'url' && modelFileUrl !== '')} +
+
+
Modelfile Content
+