diff --git a/CHANGELOG.md b/CHANGELOG.md index 48a60634c9..d827cff9c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.1.103] - 2024-02-25 + +### Added + +- **π Built-in LiteLLM Proxy**: Now includes LiteLLM proxy within Open WebUI for enhanced functionality. + + - Easily integrate existing LiteLLM configurations using `-v /path/to/config.yaml:/app/backend/data/litellm/config.yaml` flag. + - When utilizing Docker container to run Open WebUI, ensure connections to localhost use `host.docker.internal`. + +- **πΌοΈ Image Generation Enhancements**: Introducing Advanced Settings with Image Preview Feature. + - Customize image generation by setting the number of steps; defaults to A1111 value. + +### Fixed + +- Resolved issue with RAG scan halting document loading upon encountering unsupported MIME types or exceptions (Issue #866). + +### Changed + +- Ollama is no longer required to run Open WebUI. +- Access our comprehensive documentation at [Open WebUI Documentation](https://docs.openwebui.com/). + ## [0.1.102] - 2024-02-22 ### Added diff --git a/README.md b/README.md index bed73e09c0..7c40239c7d 100644 --- a/README.md +++ b/README.md @@ -103,14 +103,24 @@ Don't forget to explore our sibling project, [Open WebUI Community](https://open - After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! π -#### Troubleshooting +#### Open WebUI: Server Connection Error -Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s). +If you're experiencing connection issues, itβs often due to the WebUI docker container not being able to reach the Ollama server at 127.0.0.1:11434 (host.docker.internal:11434) inside the container . Use the `--network=host` flag in your docker command to resolve this. Note that the port changes from 3000 to 8080, resulting in the link: `http://localhost:8080`. + +**Example Docker Command**: + +```bash +docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_API_BASE_URL=http://127.0.0.1:11434/api --name open-webui --restart always ghcr.io/open-webui/open-webui:main +``` ### Other Installation Methods We offer various installation alternatives, including non-Docker methods, Docker Compose, Kustomize, and Helm. Visit our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/) or join our [Discord community](https://discord.gg/5rJgQTnV4s) for comprehensive guidance. +### Troubleshooting + +Encountering connection issues? Our [Open WebUI Documentation](https://docs.openwebui.com/getting-started/troubleshooting/) has got you covered. For further assistance and to join our vibrant community, visit the [Open WebUI Discord](https://discord.gg/5rJgQTnV4s). + ### Keeping Your Docker Installation Up-to-Date In case you want to update your local Docker installation to the latest version, you can do it with [Watchtower](https://containrrr.dev/watchtower/): diff --git a/backend/.dockerignore b/backend/.dockerignore index 11f9256fec..97ab32835d 100644 --- a/backend/.dockerignore +++ b/backend/.dockerignore @@ -4,4 +4,11 @@ _old uploads .ipynb_checkpoints *.db -_test \ No newline at end of file +_test +!/data +/data/* +!/data/litellm +/data/litellm/* +!data/litellm/config.yaml + +!data/config.json \ No newline at end of file diff --git a/backend/.gitignore b/backend/.gitignore index 16180123c8..ea83b34f43 100644 --- a/backend/.gitignore +++ b/backend/.gitignore @@ -6,6 +6,11 @@ uploads *.db _test Pipfile -data/* +!/data +/data/* +!/data/litellm +/data/litellm/* +!data/litellm/config.yaml + !data/config.json .webui_secret_key \ No newline at end of file diff --git a/backend/apps/images/main.py b/backend/apps/images/main.py index 39d3f96aa0..dfa1f187a8 100644 --- a/backend/apps/images/main.py +++ b/backend/apps/images/main.py @@ -35,6 +35,7 @@ app.add_middleware( app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL app.state.ENABLED = app.state.AUTOMATIC1111_BASE_URL != "" app.state.IMAGE_SIZE = "512x512" +app.state.IMAGE_STEPS = 50 @app.get("/enabled", response_model=bool) @@ -49,7 +50,7 @@ async def toggle_enabled(request: Request, user=Depends(get_admin_user)): app.state.ENABLED = not app.state.ENABLED return app.state.ENABLED except Exception as e: - raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) class UrlUpdateForm(BaseModel): @@ -102,6 +103,32 @@ async def update_image_size( ) +class ImageStepsUpdateForm(BaseModel): + steps: int + + +@app.get("/steps") +async def get_image_size(user=Depends(get_admin_user)): + return {"IMAGE_STEPS": app.state.IMAGE_STEPS} + + +@app.post("/steps/update") +async def update_image_size( + form_data: ImageStepsUpdateForm, user=Depends(get_admin_user) +): + if form_data.steps >= 0: + app.state.IMAGE_STEPS = form_data.steps + return { + "IMAGE_STEPS": app.state.IMAGE_STEPS, + "status": True, + } + else: + raise HTTPException( + status_code=400, + detail=ERROR_MESSAGES.INCORRECT_FORMAT(" (e.g., 50)."), + ) + + @app.get("/models") def get_models(user=Depends(get_current_user)): try: @@ -109,7 +136,8 @@ def get_models(user=Depends(get_current_user)): models = r.json() return models except Exception as e: - raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) + app.state.ENABLED = False + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) @app.get("/models/default") @@ -120,7 +148,8 @@ async def get_default_model(user=Depends(get_admin_user)): return {"model": options["sd_model_checkpoint"]} except Exception as e: - raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) + app.state.ENABLED = False + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) class UpdateModelForm(BaseModel): @@ -177,6 +206,9 @@ def generate_image( "height": height, } + if app.state.IMAGE_STEPS != None: + data["steps"] = app.state.IMAGE_STEPS + if form_data.negative_prompt != None: data["negative_prompt"] = form_data.negative_prompt @@ -190,4 +222,4 @@ def generate_image( return r.json() except Exception as e: print(e) - raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e)) + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) diff --git a/backend/config.py b/backend/config.py index fadae68ce0..effcd24620 100644 --- a/backend/config.py +++ b/backend/config.py @@ -6,6 +6,8 @@ from bs4 import BeautifulSoup from pathlib import Path import json +import yaml + import markdown import requests import shutil @@ -83,8 +85,6 @@ for version in soup.find_all("h2"): # Find the next sibling that is a h3 tag (section title) current = version.find_next_sibling() - print(current) - while current and current.name != "h2": if current.name == "h3": section_title = current.get_text().lower() # e.g., "added", "fixed" @@ -165,6 +165,40 @@ Path(CACHE_DIR).mkdir(parents=True, exist_ok=True) DOCS_DIR = f"{DATA_DIR}/docs" Path(DOCS_DIR).mkdir(parents=True, exist_ok=True) + +#################################### +# LITELLM_CONFIG +#################################### + + +def create_config_file(file_path): + directory = os.path.dirname(file_path) + + # Check if directory exists, if not, create it + if not os.path.exists(directory): + os.makedirs(directory) + + # Data to write into the YAML file + config_data = { + "general_settings": {}, + "litellm_settings": {}, + "model_list": [], + "router_settings": {}, + } + + # Write data to YAML file + with open(file_path, "w") as file: + yaml.dump(config_data, file) + + +LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml" + +if not os.path.exists(LITELLM_CONFIG_PATH): + print("Config file doesn't exist. Creating...") + create_config_file(LITELLM_CONFIG_PATH) + print("Config file created successfully.") + + #################################### # OLLAMA_API_BASE_URL #################################### diff --git a/backend/data/litellm/config.yaml b/backend/data/litellm/config.yaml new file mode 100644 index 0000000000..7d9d2b7230 --- /dev/null +++ b/backend/data/litellm/config.yaml @@ -0,0 +1,4 @@ +general_settings: {} +litellm_settings: {} +model_list: [] +router_settings: {} diff --git a/backend/main.py b/backend/main.py index 0be56752b2..a432e9ed65 100644 --- a/backend/main.py +++ b/backend/main.py @@ -2,25 +2,31 @@ from bs4 import BeautifulSoup import json import markdown import time +import os +import sys - -from fastapi import FastAPI, Request +from fastapi import FastAPI, Request, Depends from fastapi.staticfiles import StaticFiles from fastapi import HTTPException +from fastapi.responses import JSONResponse from fastapi.middleware.wsgi import WSGIMiddleware from fastapi.middleware.cors import CORSMiddleware from starlette.exceptions import HTTPException as StarletteHTTPException +from litellm.proxy.proxy_server import ProxyConfig, initialize +from litellm.proxy.proxy_server import app as litellm_app + from apps.ollama.main import app as ollama_app from apps.openai.main import app as openai_app from apps.audio.main import app as audio_app from apps.images.main import app as images_app from apps.rag.main import app as rag_app - from apps.web.main import app as webui_app + from config import WEBUI_NAME, ENV, VERSION, CHANGELOG, FRONTEND_BUILD_DIR +from utils.utils import get_http_authorization_cred, get_current_user class SPAStaticFiles(StaticFiles): @@ -34,6 +40,21 @@ class SPAStaticFiles(StaticFiles): raise ex +proxy_config = ProxyConfig() + + +async def config(): + router, model_list, general_settings = await proxy_config.load_config( + router=None, config_file_path="./data/litellm/config.yaml" + ) + + await initialize(config="./data/litellm/config.yaml", telemetry=False) + + +async def startup(): + await config() + + app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None) origins = ["*"] @@ -47,6 +68,11 @@ app.add_middleware( ) +@app.on_event("startup") +async def on_startup(): + await startup() + + @app.middleware("http") async def check_url(request: Request, call_next): start_time = int(time.time()) @@ -57,7 +83,23 @@ async def check_url(request: Request, call_next): return response +@litellm_app.middleware("http") +async def auth_middleware(request: Request, call_next): + auth_header = request.headers.get("Authorization", "") + + if ENV != "dev": + try: + user = get_current_user(get_http_authorization_cred(auth_header)) + print(user) + except Exception as e: + return JSONResponse(status_code=400, content={"detail": str(e)}) + + response = await call_next(request) + return response + + app.mount("/api/v1", webui_app) +app.mount("/litellm/api", litellm_app) app.mount("/ollama/api", ollama_app) app.mount("/openai/api", openai_app) diff --git a/backend/requirements.txt b/backend/requirements.txt index 56e1d36eb7..0cacacd800 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -16,6 +16,10 @@ aiohttp peewee bcrypt +litellm +apscheduler +google-generativeai + langchain langchain-community chromadb diff --git a/backend/utils/utils.py b/backend/utils/utils.py index c6d018145a..32724af398 100644 --- a/backend/utils/utils.py +++ b/backend/utils/utils.py @@ -58,6 +58,14 @@ def extract_token_from_auth_header(auth_header: str): return auth_header[len("Bearer ") :] +def get_http_authorization_cred(auth_header: str): + try: + scheme, credentials = auth_header.split(" ") + return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) + except: + raise ValueError(ERROR_MESSAGES.INVALID_TOKEN) + + def get_current_user( auth_token: HTTPAuthorizationCredentials = Depends(bearer_security), ): diff --git a/package.json b/package.json index 30549fdd59..7938558db5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.1.102", + "version": "0.1.103", "private": true, "scripts": { "dev": "vite dev --host", diff --git a/src/app.html b/src/app.html index 9b1099b0b2..6216e56fab 100644 --- a/src/app.html +++ b/src/app.html @@ -5,6 +5,7 @@ + @@ -45,7 +45,7 @@ {#if model.name === 'hr'}