mirror of
https://github.com/qodo-ai/pr-agent.git
synced 2025-12-12 02:45:18 +00:00
Merge b57337f854 into d36ad319f7
This commit is contained in:
commit
8b1f41fe93
5 changed files with 208 additions and 1 deletions
27
README.md
27
README.md
|
|
@ -330,6 +330,33 @@ ___
|
||||||
</div>
|
</div>
|
||||||
<hr>
|
<hr>
|
||||||
|
|
||||||
|
## Provider-agnostic push outputs and Slack relay
|
||||||
|
|
||||||
|
PR-Agent can optionally emit review results to external sinks without calling git provider APIs.
|
||||||
|
This is disabled by default. To enable and forward to Slack via a lightweight relay:
|
||||||
|
|
||||||
|
1) Start the relay (in a separate shell):
|
||||||
|
- Set an Incoming Webhook URL for Slack:
|
||||||
|
- CMD: set SLACK_WEBHOOK_URL=https://hooks.slack.com/services/TXXXX/BXXXX/XXXXXXXX
|
||||||
|
- PS: $env:SLACK_WEBHOOK_URL="https://hooks.slack.com/services/TXXXX/BXXXX/XXXXXXXX"
|
||||||
|
- Run:
|
||||||
|
uvicorn pr_agent.servers.push_outputs_relay:app --host 0.0.0.0 --port 8000
|
||||||
|
|
||||||
|
2) In your repository, configure PR-Agent to emit to the relay by creating .pr_agent.toml:
|
||||||
|
|
||||||
|
```
|
||||||
|
[push_outputs]
|
||||||
|
enable = true
|
||||||
|
channels = ["webhook"]
|
||||||
|
webhook_url = "http://localhost:8000/relay"
|
||||||
|
presentation = "markdown"
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- This mechanism is provider-agnostic and uses minimal API calls.
|
||||||
|
- You can also use the "file" channel to append JSONL records locally.
|
||||||
|
- The relay transforms the generic payload into Slack’s Incoming Webhook schema.
|
||||||
|
|
||||||
## Try It Now
|
## Try It Now
|
||||||
|
|
||||||
Try the GPT-5 powered PR-Agent instantly on _your public GitHub repository_. Just mention `@CodiumAI-Agent` and add the desired command in any PR comment. The agent will generate a response based on your command.
|
Try the GPT-5 powered PR-Agent instantly on _your public GitHub repository_. Just mention `@CodiumAI-Agent` and add the desired command in any PR comment. The agent will generate a response based on your command.
|
||||||
|
|
|
||||||
|
|
@ -1264,6 +1264,63 @@ def github_action_output(output_data: dict, key_name: str):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
# Generic push mechanism to external sinks (provider-agnostic)
|
||||||
|
# Config section: [push_outputs]
|
||||||
|
# enable = false
|
||||||
|
# channels = ["stdout"] # supported: "stdout", "file", "webhook"
|
||||||
|
# file_path = "pr-agent-outputs/reviews.jsonl"
|
||||||
|
# webhook_url = ""
|
||||||
|
# presentation = "markdown" # reserved for future presentation controls
|
||||||
|
|
||||||
|
def push_outputs(message_type: str, payload: dict | None = None, markdown: str | None = None) -> None:
|
||||||
|
try:
|
||||||
|
cfg = get_settings().get('push_outputs', {}) or {}
|
||||||
|
if not cfg.get('enable', False):
|
||||||
|
return
|
||||||
|
|
||||||
|
channels = cfg.get('channels', []) or []
|
||||||
|
record = {
|
||||||
|
"type": message_type,
|
||||||
|
"timestamp": datetime.utcnow().isoformat() + "Z",
|
||||||
|
"payload": payload or {},
|
||||||
|
}
|
||||||
|
if markdown is not None:
|
||||||
|
record["markdown"] = markdown
|
||||||
|
|
||||||
|
# stdout channel
|
||||||
|
if "stdout" in channels:
|
||||||
|
try:
|
||||||
|
print(json.dumps(record, ensure_ascii=False))
|
||||||
|
except Exception:
|
||||||
|
# Do not fail the flow if stdout printing fails
|
||||||
|
get_logger().warning("Failed to print push_outputs to stdout")
|
||||||
|
|
||||||
|
# file channel (append JSONL)
|
||||||
|
if "file" in channels:
|
||||||
|
try:
|
||||||
|
file_path = cfg.get('file_path', 'pr-agent-outputs/reviews.jsonl')
|
||||||
|
folder = os.path.dirname(file_path)
|
||||||
|
if folder:
|
||||||
|
os.makedirs(folder, exist_ok=True)
|
||||||
|
with open(file_path, 'a', encoding='utf-8') as fh:
|
||||||
|
fh.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().warning(f"Failed to write push_outputs to file: {e}")
|
||||||
|
|
||||||
|
# webhook channel (generic HTTP POST)
|
||||||
|
if "webhook" in channels:
|
||||||
|
url = cfg.get('webhook_url', '')
|
||||||
|
if url:
|
||||||
|
try:
|
||||||
|
headers = {'Content-Type': 'application/json'}
|
||||||
|
requests.post(url, data=json.dumps(record), headers=headers, timeout=5)
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().warning(f"Failed to POST push_outputs to webhook: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
get_logger().error(f"push_outputs failed: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
def show_relevant_configurations(relevant_section: str) -> str:
|
def show_relevant_configurations(relevant_section: str) -> str:
|
||||||
skip_keys = ['ai_disclaimer', 'ai_disclaimer_title', 'ANALYTICS_FOLDER', 'secret_provider', "skip_keys", "app_id", "redirect",
|
skip_keys = ['ai_disclaimer', 'ai_disclaimer_title', 'ANALYTICS_FOLDER', 'secret_provider', "skip_keys", "app_id", "redirect",
|
||||||
'trial_prefix_message', 'no_eligible_message', 'identity_provider', 'ALLOWED_REPOS','APP_NAME']
|
'trial_prefix_message', 'no_eligible_message', 'identity_provider', 'ALLOWED_REPOS','APP_NAME']
|
||||||
|
|
|
||||||
106
pr_agent/servers/push_outputs_relay.py
Normal file
106
pr_agent/servers/push_outputs_relay.py
Normal file
|
|
@ -0,0 +1,106 @@
|
||||||
|
"""
|
||||||
|
Provider-agnostic push outputs relay for Slack
|
||||||
|
|
||||||
|
This FastAPI service receives generic PR-Agent push outputs (from [push_outputs]) and relays them
|
||||||
|
as Slack Incoming Webhook messages.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
1) Run the relay (choose one):
|
||||||
|
- uvicorn pr_agent.servers.push_outputs_relay:app --host 0.0.0.0 --port 8000
|
||||||
|
- python -m pr_agent.servers.push_outputs_relay
|
||||||
|
|
||||||
|
2) Configure the destination Slack webhook:
|
||||||
|
- Set environment variable SLACK_WEBHOOK_URL to your Slack Incoming Webhook URL.
|
||||||
|
|
||||||
|
3) Point PR-Agent to the relay:
|
||||||
|
In your configuration (e.g., .pr_agent.toml or central config), enable generic push outputs:
|
||||||
|
|
||||||
|
[push_outputs]
|
||||||
|
enable = true
|
||||||
|
channels = ["webhook"]
|
||||||
|
webhook_url = "http://localhost:8000/relay" # adjust host/port if needed
|
||||||
|
presentation = "markdown"
|
||||||
|
|
||||||
|
Security
|
||||||
|
--------
|
||||||
|
- Keep the relay private or place it behind an auth gateway if exposed externally.
|
||||||
|
- You can also wrap this service with a reverse proxy that enforces authentication and rate limits.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
-----
|
||||||
|
- The relay is intentionally Slack-specific while living outside the provider-agnostic core.
|
||||||
|
- If record['markdown'] is present, it will be used as Slack message text. Otherwise, a JSON fallback
|
||||||
|
is generated from record['payload'].
|
||||||
|
- Slack supports basic Markdown (mrkdwn). Complex HTML/GitGFM sections may not render perfectly.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from fastapi import FastAPI, HTTPException
|
||||||
|
|
||||||
|
app = FastAPI(title="PR-Agent Push Outputs Relay (Slack)")
|
||||||
|
|
||||||
|
|
||||||
|
def _to_slack_text(record: Dict[str, Any]) -> str:
|
||||||
|
"""
|
||||||
|
Prefer full review markdown; otherwise fallback to a compact JSON of the payload.
|
||||||
|
"""
|
||||||
|
markdown = record.get("markdown")
|
||||||
|
if isinstance(markdown, str) and markdown.strip():
|
||||||
|
return markdown
|
||||||
|
|
||||||
|
payload = record.get("payload") or {}
|
||||||
|
try:
|
||||||
|
return "```\n" + json.dumps(payload, ensure_ascii=False, indent=2) + "\n```"
|
||||||
|
except Exception:
|
||||||
|
return str(payload)
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/relay")
|
||||||
|
async def relay(record: Dict[str, Any]):
|
||||||
|
slack_url = os.environ.get("SLACK_WEBHOOK_URL", "").strip()
|
||||||
|
if not slack_url:
|
||||||
|
raise HTTPException(status_code=500, detail="SLACK_WEBHOOK_URL environment variable is not set")
|
||||||
|
|
||||||
|
text = _to_slack_text(record)
|
||||||
|
|
||||||
|
# If using a Slack Workflow "triggers" URL, the workflow expects top-level fields
|
||||||
|
# that match the configured variables in the Workflow (e.g., "markdown", "payload").
|
||||||
|
# Otherwise, for Incoming Webhooks ("services" URL), use the standard {text, mrkdwn}.
|
||||||
|
if "hooks.slack.com/triggers/" in slack_url:
|
||||||
|
body = {
|
||||||
|
# Map our computed text to the workflow variable named "markdown"
|
||||||
|
"markdown": text,
|
||||||
|
# Provide original payload if the workflow defines a variable for it
|
||||||
|
"payload": record.get("payload", {}),
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
body = {
|
||||||
|
"text": text,
|
||||||
|
"mrkdwn": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = requests.post(slack_url, json=body, timeout=8)
|
||||||
|
if resp.status_code >= 300:
|
||||||
|
raise HTTPException(status_code=resp.status_code, detail=f"Slack webhook error: {resp.text}")
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
raise HTTPException(status_code=502, detail=f"Failed to post to Slack: {e}")
|
||||||
|
|
||||||
|
return {"status": "ok"}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Allow running directly: python -m pr_agent.servers.push_outputs_relay
|
||||||
|
import uvicorn
|
||||||
|
|
||||||
|
port = int(os.environ.get("PORT", "8000"))
|
||||||
|
uvicorn.run("pr_agent.servers.push_outputs_relay:app", host="0.0.0.0", port=port, reload=False)
|
||||||
|
|
@ -390,3 +390,13 @@ pr_commands = [
|
||||||
"/review",
|
"/review",
|
||||||
"/improve",
|
"/improve",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Generic push outputs configuration (disabled by default). This allows emitting PR outputs
|
||||||
|
# to stdout, a local file, or a generic webhook without calling provider-specific APIs.
|
||||||
|
# To enable, set enable=true and choose one or more channels.
|
||||||
|
[push_outputs]
|
||||||
|
enable = false
|
||||||
|
channels = ["stdout"]
|
||||||
|
file_path = "pr-agent-outputs/reviews.jsonl"
|
||||||
|
webhook_url = ""
|
||||||
|
presentation = "markdown"
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ from pr_agent.algo.pr_processing import (add_ai_metadata_to_diff_files,
|
||||||
from pr_agent.algo.token_handler import TokenHandler
|
from pr_agent.algo.token_handler import TokenHandler
|
||||||
from pr_agent.algo.utils import (ModelType, PRReviewHeader,
|
from pr_agent.algo.utils import (ModelType, PRReviewHeader,
|
||||||
convert_to_markdown_v2, github_action_output,
|
convert_to_markdown_v2, github_action_output,
|
||||||
load_yaml, show_relevant_configurations)
|
load_yaml, show_relevant_configurations, push_outputs)
|
||||||
from pr_agent.config_loader import get_settings
|
from pr_agent.config_loader import get_settings
|
||||||
from pr_agent.git_providers import (get_git_provider,
|
from pr_agent.git_providers import (get_git_provider,
|
||||||
get_git_provider_with_context)
|
get_git_provider_with_context)
|
||||||
|
|
@ -270,6 +270,13 @@ class PRReviewer:
|
||||||
if get_settings().get('config', {}).get('output_relevant_configurations', False):
|
if get_settings().get('config', {}).get('output_relevant_configurations', False):
|
||||||
markdown_text += show_relevant_configurations(relevant_section='pr_reviewer')
|
markdown_text += show_relevant_configurations(relevant_section='pr_reviewer')
|
||||||
|
|
||||||
|
# Push outputs to optional external channels (stdout/file/webhook) without provider APIs
|
||||||
|
try:
|
||||||
|
push_outputs("review", payload=data.get('review', {}), markdown=markdown_text)
|
||||||
|
except Exception:
|
||||||
|
# non-fatal
|
||||||
|
pass
|
||||||
|
|
||||||
# Add custom labels from the review prediction (effort, security)
|
# Add custom labels from the review prediction (effort, security)
|
||||||
self.set_review_labels(data)
|
self.set_review_labels(data)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue