From 0c40d93da4f5463844235df6c832d159030d7b0c Mon Sep 17 00:00:00 2001 From: Jeremy Mumford Date: Fri, 8 Aug 2025 14:16:28 -0600 Subject: [PATCH 1/2] renamed and added gpt-5 to reflect OpenAI updates --- backend/open_webui/routers/openai.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index 5b54796a70..62b6dedf0a 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -95,7 +95,7 @@ async def cleanup_response( await session.close() -def openai_o_series_handler(payload): +def openai_reasoning_model_handler(payload): """ Handle "o" series specific parameters """ @@ -788,9 +788,9 @@ async def generate_chat_completion( key = request.app.state.config.OPENAI_API_KEYS[idx] # Check if model is from "o" series - is_o_series = payload["model"].lower().startswith(("o1", "o3", "o4")) - if is_o_series: - payload = openai_o_series_handler(payload) + is_reasoning_model = payload["model"].lower().startswith(("o1", "o3", "o4", "gpt-5")) + if is_reasoning_model: + payload = openai_reasoning_model_handler(payload) elif "api.openai.com" not in url: # Remove "max_completion_tokens" from the payload for backward compatibility if "max_completion_tokens" in payload: From c69f2cc7760d7a7f723c8999c44e0322b2105509 Mon Sep 17 00:00:00 2001 From: Jeremy Mumford Date: Fri, 8 Aug 2025 14:20:14 -0600 Subject: [PATCH 2/2] updated comments --- backend/open_webui/routers/openai.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index 62b6dedf0a..5dc8458265 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -97,10 +97,10 @@ async def cleanup_response( def openai_reasoning_model_handler(payload): """ - Handle "o" series specific parameters + Handle reasoning model specific parameters """ if "max_tokens" in payload: - # Convert "max_tokens" to "max_completion_tokens" for all o-series models + # Convert "max_tokens" to "max_completion_tokens" for all reasoning models payload["max_completion_tokens"] = payload["max_tokens"] del payload["max_tokens"] @@ -787,7 +787,7 @@ async def generate_chat_completion( url = request.app.state.config.OPENAI_API_BASE_URLS[idx] key = request.app.state.config.OPENAI_API_KEYS[idx] - # Check if model is from "o" series + # Check if model is a reasoning model that needs special handling is_reasoning_model = payload["model"].lower().startswith(("o1", "o3", "o4", "gpt-5")) if is_reasoning_model: payload = openai_reasoning_model_handler(payload)