feat: add support for gpt-5 model and update configuration

This commit is contained in:
mrT23 2025-08-08 08:28:42 +03:00
parent 6be8860959
commit 5162d847b3
No known key found for this signature in database
GPG key ID: D350490E39D5F5AD
4 changed files with 25 additions and 3 deletions

View file

@ -28,6 +28,8 @@ MAX_TOKENS = {
'gpt-4.1-mini-2025-04-14': 1047576, 'gpt-4.1-mini-2025-04-14': 1047576,
'gpt-4.1-nano': 1047576, 'gpt-4.1-nano': 1047576,
'gpt-4.1-nano-2025-04-14': 1047576, 'gpt-4.1-nano-2025-04-14': 1047576,
'gpt-5': 200000,
'gpt-5-2025-08-07': 200000,
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens 'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens 'o1-mini-2024-09-12': 128000, # 128K, but may be limited by config.max_model_tokens
'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens 'o1-preview': 128000, # 128K, but may be limited by config.max_model_tokens

View file

@ -288,6 +288,21 @@ class LiteLLMAIHandler(BaseAiHandler):
messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]}, messages[1]["content"] = [{"type": "text", "text": messages[1]["content"]},
{"type": "image_url", "image_url": {"url": img_path}}] {"type": "image_url", "image_url": {"url": img_path}}]
thinking_kwargs_gpt5 = None
if model.startswith('gpt-5'):
if model.endswith('_thinking'):
thinking_kwargs_gpt5 = {
"reasoning_effort": 'low',
"allowed_openai_params": ["reasoning_effort"],
}
else:
thinking_kwargs_gpt5 = {
"reasoning_effort": 'minimal',
"allowed_openai_params": ["reasoning_effort"],
}
model = model.replace('_thinking', '') # remove _thinking suffix
# Currently, some models do not support a separate system and user prompts # Currently, some models do not support a separate system and user prompts
if model in self.user_message_only_models or get_settings().config.custom_reasoning_model: if model in self.user_message_only_models or get_settings().config.custom_reasoning_model:
user = f"{system}\n\n\n{user}" user = f"{system}\n\n\n{user}"
@ -310,6 +325,11 @@ class LiteLLMAIHandler(BaseAiHandler):
"api_base": self.api_base, "api_base": self.api_base,
} }
if thinking_kwargs_gpt5:
kwargs.update(thinking_kwargs_gpt5)
if 'temperature' in kwargs:
del kwargs['temperature']
# Add temperature only if model supports it # Add temperature only if model supports it
if model not in self.no_support_temperature_models and not get_settings().config.custom_reasoning_model: if model not in self.no_support_temperature_models and not get_settings().config.custom_reasoning_model:
# get_logger().info(f"Adding temperature with value {temperature} to model {model}.") # get_logger().info(f"Adding temperature with value {temperature} to model {model}.")

View file

@ -6,8 +6,8 @@
[config] [config]
# models # models
model="o4-mini" model="gpt-5-2025-08-07"
fallback_models=["gpt-4.1"] fallback_models=["o4-mini"]
#model_reasoning="o4-mini" # dedicated reasoning model for self-reflection #model_reasoning="o4-mini" # dedicated reasoning model for self-reflection
#model_weak="gpt-4o" # optional, a weaker model to use for some easier tasks #model_weak="gpt-4o" # optional, a weaker model to use for some easier tasks
# CLI # CLI

View file

@ -13,7 +13,7 @@ google-cloud-aiplatform==1.38.0
google-generativeai==0.8.3 google-generativeai==0.8.3
google-cloud-storage==2.10.0 google-cloud-storage==2.10.0
Jinja2==3.1.2 Jinja2==3.1.2
litellm==1.70.4 litellm==1.75.2
loguru==0.7.2 loguru==0.7.2
msrest==0.7.1 msrest==0.7.1
openai>=1.55.3 openai>=1.55.3