From 48aad6551455e8595b2820b68bc0c921ff96ea98 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Tue, 9 Apr 2024 23:54:20 -0700 Subject: [PATCH] refac --- backend/config.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/backend/config.py b/backend/config.py index 364f498e38..81582ab510 100644 --- a/backend/config.py +++ b/backend/config.py @@ -397,9 +397,10 @@ CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db" # this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (all-MiniLM-L6-v2) RAG_EMBEDDING_MODEL = os.environ.get("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2") log.info(f"Embedding model set: {RAG_EMBEDDING_MODEL}"), -RAG_EMBEDDING_MODEL_AUTO_UPDATE = False -if os.environ.get("RAG_EMBEDDING_MODEL_AUTO_UPDATE", "").lower() == "true": - RAG_EMBEDDING_MODEL_AUTO_UPDATE = True + +RAG_EMBEDDING_MODEL_AUTO_UPDATE = ( + os.environ.get("RAG_EMBEDDING_MODEL_AUTO_UPDATE", "").lower() == "true" +) # device type ebbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance