feat: update max context tokens configuration and improve documentation

This commit is contained in:
mrT23 2025-08-27 15:20:10 +03:00
parent 8258c2e774
commit ae4fc71603
No known key found for this signature in database
GPG key ID: D350490E39D5F5AD
3 changed files with 1 additions and 11 deletions

View file

@ -517,7 +517,7 @@ Qodo Merge uses a dynamic strategy to generate code suggestions based on the siz
#### 1. Chunking large PRs
- Qodo Merge divides large PRs into 'chunks'.
- Each chunk contains up to `pr_code_suggestions.max_context_tokens` tokens (default: 24,000).
- Each chunk contains up to `config.max_model_tokens` tokens (default: 32,000).
#### 2. Generating suggestions

View file

@ -132,8 +132,6 @@ use_conversation_history=true
[pr_code_suggestions] # /improve #
max_context_tokens=24000
#
commitable_code_suggestions = false
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
focus_only_on_problems=true

View file

@ -39,14 +39,6 @@ class PRCodeSuggestions:
self.git_provider.get_languages(), self.git_provider.get_files()
)
# limit context specifically for the improve command, which has hard input to parse:
if get_settings().pr_code_suggestions.max_context_tokens:
MAX_CONTEXT_TOKENS_IMPROVE = get_settings().pr_code_suggestions.max_context_tokens
if get_settings().config.max_model_tokens > MAX_CONTEXT_TOKENS_IMPROVE:
get_logger().info(f"Setting max_model_tokens to {MAX_CONTEXT_TOKENS_IMPROVE} for PR improve")
get_settings().config.max_model_tokens_original = get_settings().config.max_model_tokens
get_settings().config.max_model_tokens = MAX_CONTEXT_TOKENS_IMPROVE
num_code_suggestions = int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk)
self.ai_handler = ai_handler()