From 5fc466bfbcccc852f6778beb94b62b417afd76e3 Mon Sep 17 00:00:00 2001
From: Alessio <148966056+alessio-locatelli@users.noreply.github.com>
Date: Fri, 22 Aug 2025 09:53:08 +0300
Subject: [PATCH] PR reviewer tool: add an opt-in work time estimation (#2006)
* feat: add `ContributionTimeCostEstimate`
* docs: mentiond `require_estimate_contribution_time_cost` for `reviewer`
* feat: implement time cost estimate for `reviewer`
* test: non-GFM output
To ensure parity and prevent regressions in plain Markdown rendering.
---
docs/docs/tools/review.md | 4 +++
pr_agent/algo/utils.py | 11 ++++++-
pr_agent/settings/configuration.toml | 1 +
pr_agent/settings/pr_reviewer_prompts.toml | 29 ++++++++++++++++++
pr_agent/tools/pr_reviewer.py | 1 +
tests/unittest/test_convert_to_markdown.py | 34 ++++++++++++++++++++++
6 files changed, 79 insertions(+), 1 deletion(-)
diff --git a/docs/docs/tools/review.md b/docs/docs/tools/review.md
index 7665202a..7ef46d83 100644
--- a/docs/docs/tools/review.md
+++ b/docs/docs/tools/review.md
@@ -91,6 +91,10 @@ extra_instructions = "..."
"
@@ -1465,4 +1474,4 @@ def format_todo_items(value: list[TodoItem] | TodoItem, git_provider, gfm_suppor
markdown_text += f"- {format_todo_item(todo_item, git_provider, gfm_supported)}\n"
else:
markdown_text += f"- {format_todo_item(value, git_provider, gfm_supported)}\n"
- return markdown_text
\ No newline at end of file
+ return markdown_text
diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml
index 86ea58dd..1fee37a5 100644
--- a/pr_agent/settings/configuration.toml
+++ b/pr_agent/settings/configuration.toml
@@ -79,6 +79,7 @@ require_tests_review=true
require_estimate_effort_to_review=true
require_can_be_split_review=false
require_security_review=true
+require_estimate_contribution_time_cost=false
require_todo_scan=false
require_ticket_analysis_review=true
# general options
diff --git a/pr_agent/settings/pr_reviewer_prompts.toml b/pr_agent/settings/pr_reviewer_prompts.toml
index 16ac68c0..a2f2d8a8 100644
--- a/pr_agent/settings/pr_reviewer_prompts.toml
+++ b/pr_agent/settings/pr_reviewer_prompts.toml
@@ -89,6 +89,14 @@ class TicketCompliance(BaseModel):
requires_further_human_verification: str = Field(description="Bullet-point list of items from the 'ticket_requirements' section above that cannot be assessed through code review alone, are unclear, or need further human review (e.g., browser testing, UI checks). Leave empty if all 'ticket_requirements' were marked as fully compliant or not compliant")
{%- endif %}
+{%- if require_estimate_contribution_time_cost %}
+
+class ContributionTimeCostEstimate(BaseModel):
+ best_case: str = Field(description="An expert in the relevant technology stack, with no unforeseen issues or bugs during the work.", examples=["45m", "5h", "30h"])
+ average_case: str = Field(description="A senior developer with only brief familiarity with this specific technology stack, and no major unforeseen issues.", examples=["45m", "5h", "30h"])
+ worst_case: str = Field(description="A senior developer with no prior experience in this specific technology stack, requiring significant time for research, debugging, or resolving unexpected errors.", examples=["45m", "5h", "30h"])
+{%- endif %}
+
class Review(BaseModel):
{%- if related_tickets %}
ticket_compliance_check: List[TicketCompliance] = Field(description="A list of compliance checks for the related tickets")
@@ -96,6 +104,9 @@ class Review(BaseModel):
{%- if require_estimate_effort_to_review %}
estimated_effort_to_review_[1-5]: int = Field(description="Estimate, on a scale of 1-5 (inclusive), the time and effort required to review this PR by an experienced and knowledgeable developer. 1 means short and easy review , 5 means long and hard review. Take into account the size, complexity, quality, and the needed changes of the PR code diff.")
{%- endif %}
+{%- if require_estimate_contribution_time_cost %}
+ contribution_time_cost_estimate: ContributionTimeCostEstimate = Field(description="An estimate of the time required to implement the changes, based on the quantity, quality, and complexity of the contribution, as well as the context from the PR description and commit messages.")
+{%- endif %}
{%- if require_score %}
score: str = Field(description="Rate this PR on a scale of 0-100 (inclusive), where 0 means the worst possible PR code, and 100 means PR code of the highest quality, without any bugs or performance issues, that is ready to be merged immediately and run in production at scale.")
{%- endif %}
@@ -170,6 +181,15 @@ review:
title: ...
- ...
{%- endif %}
+{%- if require_estimate_contribution_time_cost %}
+ contribution_time_cost_estimate:
+ best_case: |
+ ...
+ average_case: |
+ ...
+ worst_case: |
+ ...
+{%- endif %}
```
Answer should be a valid YAML, and nothing else. Each YAML output MUST be after a newline, with proper indent, and block scalar indicator ('|')
@@ -299,6 +319,15 @@ review:
title: ...
- ...
{%- endif %}
+{%- if require_estimate_contribution_time_cost %}
+ contribution_time_cost_estimate:
+ best_case: |
+ ...
+ average_case: |
+ ...
+ worst_case: |
+ ...
+{%- endif %}
```
(replace '...' with the actual values)
{%- endif %}
diff --git a/pr_agent/tools/pr_reviewer.py b/pr_agent/tools/pr_reviewer.py
index d3004bf4..c4917f35 100644
--- a/pr_agent/tools/pr_reviewer.py
+++ b/pr_agent/tools/pr_reviewer.py
@@ -85,6 +85,7 @@ class PRReviewer:
"require_score": get_settings().pr_reviewer.require_score_review,
"require_tests": get_settings().pr_reviewer.require_tests_review,
"require_estimate_effort_to_review": get_settings().pr_reviewer.require_estimate_effort_to_review,
+ "require_estimate_contribution_time_cost": get_settings().pr_reviewer.require_estimate_contribution_time_cost,
'require_can_be_split_review': get_settings().pr_reviewer.require_can_be_split_review,
'require_security_review': get_settings().pr_reviewer.require_security_review,
'require_todo_scan': get_settings().pr_reviewer.get("require_todo_scan", False),
diff --git a/tests/unittest/test_convert_to_markdown.py b/tests/unittest/test_convert_to_markdown.py
index 187ea4a8..0d18e03c 100644
--- a/tests/unittest/test_convert_to_markdown.py
+++ b/tests/unittest/test_convert_to_markdown.py
@@ -222,6 +222,40 @@ class TestConvertToMarkdown:
assert convert_to_markdown_v2(input_data).strip() == expected_output.strip()
+ def test_contribution_time_cost_estimate(self):
+ input_data = {
+ 'review': {
+ 'contribution_time_cost_estimate': {
+ 'best_case': '1h',
+ 'average_case': '2h',
+ 'worst_case': '30m',
+ }
+ }
+ }
+
+ expected_output = textwrap.dedent(f"""
+ {PRReviewHeader.REGULAR.value} 🔍
+
+ Here are some key observations to aid the review process:
+
+
+ | ⏳ Contribution time estimate (best, average, worst case): 1h | 2h | 30 minutes |
+
+ """)
+ assert convert_to_markdown_v2(input_data).strip() == expected_output.strip()
+
+ # Non-GFM branch
+ expected_output_no_gfm = textwrap.dedent(f"""
+ {PRReviewHeader.REGULAR.value} 🔍
+
+ Here are some key observations to aid the review process:
+
+ ### ⏳ Contribution time estimate (best, average, worst case): 1h | 2h | 30 minutes
+
+ """)
+ assert convert_to_markdown_v2(input_data, gfm_supported=False).strip() == expected_output_no_gfm.strip()
+
+
# Tests that the function works correctly with an empty dictionary input
def test_empty_dictionary_input(self):
input_data = {}
|