mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-12-12 10:55:17 +00:00
feat: update max context tokens configuration and improve documentation
This commit is contained in:
parent
8258c2e774
commit
ae4fc71603
3 changed files with 1 additions and 11 deletions
|
|
@ -517,7 +517,7 @@ Qodo Merge uses a dynamic strategy to generate code suggestions based on the siz
|
||||||
#### 1. Chunking large PRs
|
#### 1. Chunking large PRs
|
||||||
|
|
||||||
- Qodo Merge divides large PRs into 'chunks'.
|
- Qodo Merge divides large PRs into 'chunks'.
|
||||||
- Each chunk contains up to `pr_code_suggestions.max_context_tokens` tokens (default: 24,000).
|
- Each chunk contains up to `config.max_model_tokens` tokens (default: 32,000).
|
||||||
|
|
||||||
#### 2. Generating suggestions
|
#### 2. Generating suggestions
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -132,8 +132,6 @@ use_conversation_history=true
|
||||||
|
|
||||||
|
|
||||||
[pr_code_suggestions] # /improve #
|
[pr_code_suggestions] # /improve #
|
||||||
max_context_tokens=24000
|
|
||||||
#
|
|
||||||
commitable_code_suggestions = false
|
commitable_code_suggestions = false
|
||||||
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
|
dual_publishing_score_threshold=-1 # -1 to disable, [0-10] to set the threshold (>=) for publishing a code suggestion both in a table and as commitable
|
||||||
focus_only_on_problems=true
|
focus_only_on_problems=true
|
||||||
|
|
|
||||||
|
|
@ -39,14 +39,6 @@ class PRCodeSuggestions:
|
||||||
self.git_provider.get_languages(), self.git_provider.get_files()
|
self.git_provider.get_languages(), self.git_provider.get_files()
|
||||||
)
|
)
|
||||||
|
|
||||||
# limit context specifically for the improve command, which has hard input to parse:
|
|
||||||
if get_settings().pr_code_suggestions.max_context_tokens:
|
|
||||||
MAX_CONTEXT_TOKENS_IMPROVE = get_settings().pr_code_suggestions.max_context_tokens
|
|
||||||
if get_settings().config.max_model_tokens > MAX_CONTEXT_TOKENS_IMPROVE:
|
|
||||||
get_logger().info(f"Setting max_model_tokens to {MAX_CONTEXT_TOKENS_IMPROVE} for PR improve")
|
|
||||||
get_settings().config.max_model_tokens_original = get_settings().config.max_model_tokens
|
|
||||||
get_settings().config.max_model_tokens = MAX_CONTEXT_TOKENS_IMPROVE
|
|
||||||
|
|
||||||
num_code_suggestions = int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk)
|
num_code_suggestions = int(get_settings().pr_code_suggestions.num_code_suggestions_per_chunk)
|
||||||
|
|
||||||
self.ai_handler = ai_handler()
|
self.ai_handler = ai_handler()
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue