mirror of
https://github.com/open-webui/open-webui.git
synced 2025-12-13 21:05:19 +00:00
Merge pull request #17158 from sihyeonn/fix/sh-cache
perf: fix cache key generation for model list caching
This commit is contained in:
commit
048f30aa97
2 changed files with 2 additions and 2 deletions
|
|
@ -340,7 +340,7 @@ def merge_ollama_models_lists(model_lists):
|
||||||
return list(merged_models.values())
|
return list(merged_models.values())
|
||||||
|
|
||||||
|
|
||||||
@cached(ttl=MODELS_CACHE_TTL)
|
@cached(ttl=MODELS_CACHE_TTL, key=lambda _, user: f"ollama_all_models_{user.id}" if user else "ollama_all_models")
|
||||||
async def get_all_models(request: Request, user: UserModel = None):
|
async def get_all_models(request: Request, user: UserModel = None):
|
||||||
log.info("get_all_models()")
|
log.info("get_all_models()")
|
||||||
if request.app.state.config.ENABLE_OLLAMA_API:
|
if request.app.state.config.ENABLE_OLLAMA_API:
|
||||||
|
|
|
||||||
|
|
@ -401,7 +401,7 @@ async def get_filtered_models(models, user):
|
||||||
return filtered_models
|
return filtered_models
|
||||||
|
|
||||||
|
|
||||||
@cached(ttl=MODELS_CACHE_TTL)
|
@cached(ttl=MODELS_CACHE_TTL, key=lambda _, user: f"openai_all_models_{user.id}" if user else "openai_all_models")
|
||||||
async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
|
async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
|
||||||
log.info("get_all_models()")
|
log.info("get_all_models()")
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue