From ae4fc716036ce3395e3943addcd13a8beb23d0f3 Mon Sep 17 00:00:00 2001 From: mrT23 Date: Wed, 27 Aug 2025 15:20:10 +0300 Subject: [PATCH] feat: update max context tokens configuration and improve documentation --- docs/docs/tools/improve.md | 2 +- pr_agent/settings/configuration.toml | 2 -- pr_agent/tools/pr_code_suggestions.py | 8 -------- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/docs/docs/tools/improve.md b/docs/docs/tools/improve.md index f4acf488..fc07cf85 100644 --- a/docs/docs/tools/improve.md +++ b/docs/docs/tools/improve.md @@ -517,7 +517,7 @@ Qodo Merge uses a dynamic strategy to generate code suggestions based on the siz #### 1. Chunking large PRs - Qodo Merge divides large PRs into 'chunks'. -- Each chunk contains up to `pr_code_suggestions.max_context_tokens` tokens (default: 24,000). +- Each chunk contains up to `config.max_model_tokens` tokens (default: 32,000). #### 2. Generating suggestions diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml index 1fee37a5..325400d6 100644 --- a/pr_agent/settings/configuration.toml +++ b/pr_agent/settings/configuration.toml @@ -132,8 +132,6 @@ use_conversation_history=true [pr_code_suggestions] # /improve # -max_context_tokens=24000 -# commitable_code_suggestions = false dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable focus_only_on_problems=true diff --git a/pr_agent/tools/pr_code_suggestions.py b/pr_agent/tools/pr_code_suggestions.py index e0d88ede..30292074 100644 --- a/pr_agent/tools/pr_code_suggestions.py +++ b/pr_agent/tools/pr_code_suggestions.py @@ -39,14 +39,6 @@ class PRCodeSuggestions: self.git_provider.get_languages(), self.git_provider.get_files() ) - # limit context specifically for the improve command, which has hard input to parse: - if get_settings().pr_code_suggestions.max_context_tokens: - MAX_CONTEXT_TOKENS_IMPROVE = get_settings().pr_code_suggestions.max_context_tokens - if get_settings().config.max_model_tokens > MAX_CONTEXT_TOKENS_IMPROVE: - get_logger().info(f"Setting max_model_tokens to {MAX_CONTEXT_TOKENS_IMPROVE} for PR improve") - get_settings().config.max_model_tokens_original = get_settings().config.max_model_tokens - get_settings().config.max_model_tokens = MAX_CONTEXT_TOKENS_IMPROVE - num_code_suggestions = int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk) self.ai_handler = ai_handler()