mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-12-12 02:45:18 +00:00
Merge pull request #1986 from qodo-ai/tr/revert_litellm
fix: update model prefix in litellm_ai_handler and adjust dependencie…
This commit is contained in:
commit
4958decc89
4 changed files with 13 additions and 2 deletions
|
|
@ -72,6 +72,10 @@ You can receive automatic feedback from Qodo Merge on your local IDE after each
|
|||
|
||||
## News and Updates
|
||||
|
||||
## Aug 8, 2025
|
||||
|
||||
Added full support for GPT-5 models. View the [benchmark results](https://qodo-merge-docs.qodo.ai/pr_benchmark/#pr-benchmark-results) for details on the performance of GPT-5 models in PR-Agent.
|
||||
|
||||
## Jul 1, 2025
|
||||
You can now receive automatic feedback from Qodo Merge in your local IDE after each commit. Read more about it [here](https://github.com/qodo-ai/agents/tree/main/agents/qodo-merge-post-commit).
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ MAX_TOKENS = {
|
|||
'gpt-4.1-mini-2025-04-14': 1047576,
|
||||
'gpt-4.1-nano': 1047576,
|
||||
'gpt-4.1-nano-2025-04-14': 1047576,
|
||||
'gpt-5-nano': 200000, # 200K, but may be limited by config.max_model_tokens
|
||||
'gpt-5-mini': 200000, # 200K, but may be limited by config.max_model_tokens
|
||||
'gpt-5': 200000,
|
||||
'gpt-5-2025-08-07': 200000,
|
||||
'o1-mini': 128000, # 128K, but may be limited by config.max_model_tokens
|
||||
|
|
|
|||
|
|
@ -300,7 +300,7 @@ class LiteLLMAIHandler(BaseAiHandler):
|
|||
"reasoning_effort": 'minimal',
|
||||
"allowed_openai_params": ["reasoning_effort"],
|
||||
}
|
||||
model = model.replace('_thinking', '') # remove _thinking suffix
|
||||
model = 'openai/'+model.replace('_thinking', '') # remove _thinking suffix
|
||||
|
||||
|
||||
# Currently, some models do not support a separate system and user prompts
|
||||
|
|
|
|||
|
|
@ -1,16 +1,19 @@
|
|||
aiohttp==3.10.2
|
||||
anthropic>=0.52.0
|
||||
#anthropic[vertex]==0.47.1
|
||||
atlassian-python-api==3.41.4
|
||||
azure-devops==7.1.0b3
|
||||
azure-identity==1.15.0
|
||||
boto3==1.33.6
|
||||
certifi==2024.8.30
|
||||
dynaconf==3.2.4
|
||||
fastapi==0.115.6
|
||||
GitPython==3.1.41
|
||||
google-cloud-aiplatform==1.38.0
|
||||
google-generativeai==0.8.3
|
||||
google-cloud-storage==2.10.0
|
||||
Jinja2==3.1.2
|
||||
litellm[proxy]==1.75.2
|
||||
litellm==1.73.6
|
||||
loguru==0.7.2
|
||||
msrest==0.7.1
|
||||
openai>=1.55.3
|
||||
|
|
@ -22,7 +25,9 @@ retry==0.9.2
|
|||
starlette-context==0.3.6
|
||||
tiktoken==0.8.0
|
||||
ujson==5.8.0
|
||||
uvicorn==0.22.0
|
||||
tenacity==8.2.3
|
||||
gunicorn==23.0.0
|
||||
pytest-cov==5.0.0
|
||||
pydantic==2.8.2
|
||||
html2text==2024.2.26
|
||||
|
|
|
|||
Loading…
Reference in a new issue