diff --git a/backend/open_webui/utils/embeddings.py b/backend/open_webui/utils/embeddings.py index 781fb90bd1..c3fae66022 100644 --- a/backend/open_webui/utils/embeddings.py +++ b/backend/open_webui/utils/embeddings.py @@ -15,7 +15,7 @@ from open_webui.routers.pipelines import process_pipeline_inlet_filter from open_webui.utils.payload import convert_embedding_payload_openai_to_ollama -from open_webui.utils.response import convert_response_ollama_to_openai +from open_webui.utils.response import convert_embedding_response_ollama_to_openai logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL) log = logging.getLogger(__name__) @@ -117,7 +117,7 @@ async def generate_embeddings( form_data=form_obj, user=user, ) - return convert_response_ollama_to_openai(response) + return convert_embedding_response_ollama_to_openai(response) # Default: OpenAI or compatible backend return await openai_embeddings( diff --git a/backend/open_webui/utils/response.py b/backend/open_webui/utils/response.py index 59deb726e0..b454325d8a 100644 --- a/backend/open_webui/utils/response.py +++ b/backend/open_webui/utils/response.py @@ -126,7 +126,7 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response) yield "data: [DONE]\n\n" -def convert_response_ollama_to_openai(response): +def convert_embedding_response_ollama_to_openai(response) -> dict: """ Convert the response from Ollama embeddings endpoint to the OpenAI-compatible format.