mirror of
https://github.com/open-webui/open-webui.git
synced 2025-12-15 05:45:19 +00:00
Formatting
This commit is contained in:
parent
b3de3295d6
commit
c821c3ecb0
2 changed files with 16 additions and 6 deletions
|
|
@ -924,7 +924,10 @@ try:
|
||||||
app.state.config.RAG_EMBEDDING_MODEL,
|
app.state.config.RAG_EMBEDDING_MODEL,
|
||||||
RAG_EMBEDDING_MODEL_AUTO_UPDATE,
|
RAG_EMBEDDING_MODEL_AUTO_UPDATE,
|
||||||
)
|
)
|
||||||
if app.state.config.ENABLE_RAG_HYBRID_SEARCH and not app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL:
|
if (
|
||||||
|
app.state.config.ENABLE_RAG_HYBRID_SEARCH
|
||||||
|
and not app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL
|
||||||
|
):
|
||||||
app.state.rf = get_rf(
|
app.state.rf = get_rf(
|
||||||
app.state.config.RAG_RERANKING_ENGINE,
|
app.state.config.RAG_RERANKING_ENGINE,
|
||||||
app.state.config.RAG_RERANKING_MODEL,
|
app.state.config.RAG_RERANKING_MODEL,
|
||||||
|
|
|
||||||
|
|
@ -280,14 +280,16 @@ async def update_embedding_config(
|
||||||
log.info(
|
log.info(
|
||||||
f"Updating embedding model: {request.app.state.config.RAG_EMBEDDING_MODEL} to {form_data.embedding_model}"
|
f"Updating embedding model: {request.app.state.config.RAG_EMBEDDING_MODEL} to {form_data.embedding_model}"
|
||||||
)
|
)
|
||||||
if request.app.state.config.RAG_EMBEDDING_ENGINE == '':
|
if request.app.state.config.RAG_EMBEDDING_ENGINE == "":
|
||||||
# unloads current internal embedding model and clears VRAM cache
|
# unloads current internal embedding model and clears VRAM cache
|
||||||
request.app.state.ef = None
|
request.app.state.ef = None
|
||||||
request.app.state.EMBEDDING_FUNCTION = None
|
request.app.state.EMBEDDING_FUNCTION = None
|
||||||
import gc
|
import gc
|
||||||
|
|
||||||
gc.collect()
|
gc.collect()
|
||||||
if DEVICE_TYPE == 'cuda':
|
if DEVICE_TYPE == "cuda":
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
try:
|
try:
|
||||||
|
|
@ -815,14 +817,16 @@ async def update_rag_config(
|
||||||
)
|
)
|
||||||
|
|
||||||
# Reranking settings
|
# Reranking settings
|
||||||
if request.app.state.config.RAG_RERANKING_ENGINE == '':
|
if request.app.state.config.RAG_RERANKING_ENGINE == "":
|
||||||
# Unloading the internal reranker and clear VRAM memory
|
# Unloading the internal reranker and clear VRAM memory
|
||||||
request.app.state.rf = None
|
request.app.state.rf = None
|
||||||
request.app.state.RERANKING_FUNCTION = None
|
request.app.state.RERANKING_FUNCTION = None
|
||||||
import gc
|
import gc
|
||||||
|
|
||||||
gc.collect()
|
gc.collect()
|
||||||
if DEVICE_TYPE == 'cuda':
|
if DEVICE_TYPE == "cuda":
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
request.app.state.config.RAG_RERANKING_ENGINE = (
|
request.app.state.config.RAG_RERANKING_ENGINE = (
|
||||||
|
|
@ -854,7 +858,10 @@ async def update_rag_config(
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if request.app.state.config.ENABLE_RAG_HYBRID_SEARCH and not request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL:
|
if (
|
||||||
|
request.app.state.config.ENABLE_RAG_HYBRID_SEARCH
|
||||||
|
and not request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL
|
||||||
|
):
|
||||||
request.app.state.rf = get_rf(
|
request.app.state.rf = get_rf(
|
||||||
request.app.state.config.RAG_RERANKING_ENGINE,
|
request.app.state.config.RAG_RERANKING_ENGINE,
|
||||||
request.app.state.config.RAG_RERANKING_MODEL,
|
request.app.state.config.RAG_RERANKING_MODEL,
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue