diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index 1a6b75c555..4c5cdce8ca 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -340,7 +340,7 @@ def merge_ollama_models_lists(model_lists): return list(merged_models.values()) -@cached(ttl=MODELS_CACHE_TTL) +@cached(ttl=MODELS_CACHE_TTL, key=lambda _, user: f"ollama_all_models_{user.id}" if user else "ollama_all_models") async def get_all_models(request: Request, user: UserModel = None): log.info("get_all_models()") if request.app.state.config.ENABLE_OLLAMA_API: diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index 7ba0c5f68a..a94791bdf5 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -401,7 +401,7 @@ async def get_filtered_models(models, user): return filtered_models -@cached(ttl=MODELS_CACHE_TTL) +@cached(ttl=MODELS_CACHE_TTL, key=lambda _, user: f"openai_all_models_{user.id}" if user else "openai_all_models") async def get_all_models(request: Request, user: UserModel) -> dict[str, list]: log.info("get_all_models()")