mirror of
https://github.com/open-webui/open-webui.git
synced 2025-12-12 04:15:25 +00:00
fix: openai response propagation issue
This commit is contained in:
parent
b581536a66
commit
059cc636f6
1 changed files with 85 additions and 72 deletions
|
|
@ -19,7 +19,7 @@ from concurrent.futures import ThreadPoolExecutor
|
|||
|
||||
|
||||
from fastapi import Request, HTTPException
|
||||
from starlette.responses import Response, StreamingResponse
|
||||
from starlette.responses import Response, StreamingResponse, JSONResponse
|
||||
|
||||
|
||||
from open_webui.models.chats import Chats
|
||||
|
|
@ -1254,8 +1254,13 @@ async def process_chat_response(
|
|||
# Non-streaming response
|
||||
if not isinstance(response, StreamingResponse):
|
||||
if event_emitter:
|
||||
if "error" in response:
|
||||
error = response["error"].get("detail", response["error"])
|
||||
if isinstance(response, dict) or isinstance(response, JSONResponse):
|
||||
response_data = (
|
||||
response if isinstance(response, dict) else response.content
|
||||
)
|
||||
|
||||
if "error" in response_data:
|
||||
error = response_data["error"].get("detail", response_data["error"])
|
||||
Chats.upsert_message_to_chat_by_id_and_message_id(
|
||||
metadata["chat_id"],
|
||||
metadata["message_id"],
|
||||
|
|
@ -1264,25 +1269,24 @@ async def process_chat_response(
|
|||
},
|
||||
)
|
||||
|
||||
if "selected_model_id" in response:
|
||||
if "selected_model_id" in response_data:
|
||||
Chats.upsert_message_to_chat_by_id_and_message_id(
|
||||
metadata["chat_id"],
|
||||
metadata["message_id"],
|
||||
{
|
||||
"selectedModelId": response["selected_model_id"],
|
||||
"selectedModelId": response_data["selected_model_id"],
|
||||
},
|
||||
)
|
||||
|
||||
choices = response.get("choices", [])
|
||||
choices = response_data.get("choices", [])
|
||||
if choices and choices[0].get("message", {}).get("content"):
|
||||
content = response["choices"][0]["message"]["content"]
|
||||
content = response_data["choices"][0]["message"]["content"]
|
||||
|
||||
if content:
|
||||
|
||||
await event_emitter(
|
||||
{
|
||||
"type": "chat:completion",
|
||||
"data": response,
|
||||
"data": response_data,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -1327,7 +1331,7 @@ async def process_chat_response(
|
|||
|
||||
await background_tasks_handler()
|
||||
|
||||
if events and isinstance(events, list) and isinstance(response, dict):
|
||||
if events and isinstance(events, list):
|
||||
extra_response = {}
|
||||
for event in events:
|
||||
if isinstance(event, dict):
|
||||
|
|
@ -1335,11 +1339,20 @@ async def process_chat_response(
|
|||
else:
|
||||
extra_response[event] = True
|
||||
|
||||
response = {
|
||||
response_data = {
|
||||
**extra_response,
|
||||
**response,
|
||||
**response_data,
|
||||
}
|
||||
|
||||
if isinstance(response, JSONResponse):
|
||||
response = JSONResponse(
|
||||
content=response_data,
|
||||
headers=response.headers,
|
||||
status_code=response.status_code,
|
||||
)
|
||||
else:
|
||||
response = response_data
|
||||
|
||||
return response
|
||||
else:
|
||||
if events and isinstance(events, list) and isinstance(response, dict):
|
||||
|
|
|
|||
Loading…
Reference in a new issue