This is an automated email from the ASF dual-hosted git repository.

potiuk pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/airflow.git


The following commit(s) were added to refs/heads/main by this push:
     new b157db8a096 Refactor auto_triage into smaller focused methods (#63347)
b157db8a096 is described below

commit b157db8a0964c866f0bfecffc255b71f6cc3d7ae
Author: Jarek Potiuk <[email protected]>
AuthorDate: Wed Mar 11 15:08:49 2026 +0100

    Refactor auto_triage into smaller focused methods (#63347)
    
    Extract the monolithic auto_triage function body into focused helper
    functions: TriageStats, TriageContext, _execute_triage_action,
    _prompt_and_execute_flagged_pr, _display_pr_overview_table,
    _filter_candidate_prs, _enrich_candidate_details,
    _review_workflow_approval_prs, _review_deterministic_flagged_prs,
    _review_llm_flagged_prs, _review_passing_prs, and
    _display_triage_summary.
    
    Co-authored-by: Claude Opus 4.6 <[email protected]>
---
 .../src/airflow_breeze/commands/pr_commands.py     | 1850 ++++++++++----------
 1 file changed, 928 insertions(+), 922 deletions(-)

diff --git a/dev/breeze/src/airflow_breeze/commands/pr_commands.py 
b/dev/breeze/src/airflow_breeze/commands/pr_commands.py
index 6e8bd6176fa..e542f585a47 100644
--- a/dev/breeze/src/airflow_breeze/commands/pr_commands.py
+++ b/dev/breeze/src/airflow_breeze/commands/pr_commands.py
@@ -20,6 +20,7 @@ import sys
 import time
 from concurrent.futures import ThreadPoolExecutor
 from dataclasses import dataclass
+from typing import TYPE_CHECKING
 
 import click
 from rich.panel import Panel
@@ -39,6 +40,9 @@ from airflow_breeze.utils.custom_param_types import 
NotVerifiedBetterChoice
 from airflow_breeze.utils.run_utils import run_command
 from airflow_breeze.utils.shared_options import get_dry_run, get_verbose
 
+if TYPE_CHECKING:
+    from airflow_breeze.utils.github import PRAssessment
+
 QUALITY_CRITERIA_LINK = (
     "[Pull Request quality 
criteria](https://github.com/apache/airflow/blob/main/";
     "contributing-docs/05_pull_requests.rst#pull-request-quality-criteria)"
@@ -1317,127 +1321,842 @@ def _collect_llm_results(
         llm_assessments[pr.number] = assessment
 
 
-def _fetch_pr_diff(token: str, github_repository: str, pr_number: int) -> str 
| None:
-    """Fetch the diff for a PR via GitHub REST API. Returns the diff text or 
None on failure."""
-    import requests
+@dataclass
+class TriageStats:
+    """Mutable counters for triage actions taken during auto-triage."""
 
-    url = f"https://api.github.com/repos/{github_repository}/pulls/{pr_number}";
-    response = requests.get(
-        url,
-        headers={"Authorization": f"Bearer {token}", "Accept": 
"application/vnd.github.v3.diff"},
-        timeout=60,
-    )
-    if response.status_code != 200:
-        return None
-    return response.text
+    total_converted: int = 0
+    total_commented: int = 0
+    total_closed: int = 0
+    total_ready: int = 0
+    total_skipped_action: int = 0
+    total_workflows_approved: int = 0
+    quit_early: bool = False
 
 
-def _fetch_author_open_prs(token: str, github_repository: str, author_login: 
str) -> list[dict]:
-    """Fetch all open PRs by a given author. Returns list of dicts with 
number, url, title, node_id."""
-    search_query = f"repo:{github_repository} type:pr is:open 
author:{author_login}"
-    data = _graphql_request(token, _SEARCH_PRS_QUERY, {"query": search_query, 
"first": 100})
-    results = []
-    for node in data["search"]["nodes"]:
-        if not node:
-            continue
-        results.append(
-            {
-                "number": node["number"],
-                "url": node["url"],
-                "title": node["title"],
-                "node_id": node["id"],
-            }
+@dataclass
+class TriageContext:
+    """Shared context passed to all triage review phases."""
+
+    token: str
+    github_repository: str
+    dry_run: bool
+    answer_triage: str | None
+    stats: TriageStats
+    author_flagged_count: dict[str, int]
+    # LLM background state
+    llm_future_to_pr: dict
+    llm_assessments: dict
+    llm_completed: list
+    llm_errors: list[int]
+    llm_passing: list
+
+    def collect_llm_progress(self) -> None:
+        """Collect completed LLM results and print progress status."""
+        if not self.llm_future_to_pr:
+            return
+        _collect_llm_results(
+            self.llm_future_to_pr,
+            self.llm_assessments,
+            self.llm_completed,
+            self.llm_errors,
+            self.llm_passing,
         )
-    return results
+        progress = _llm_progress_status(
+            len(self.llm_completed),
+            len(self.llm_future_to_pr),
+            len(self.llm_assessments),
+            len(self.llm_errors),
+        )
+        if progress:
+            get_console().print(progress)
+
+
+def _execute_triage_action(
+    ctx: TriageContext,
+    pr: PRData,
+    action: TriageAction,
+    *,
+    draft_comment: str,
+    close_comment: str,
+    comment_only_text: str | None = None,
+) -> None:
+    """Execute a single triage action on a PR. Mutates ctx.stats."""
+    stats = ctx.stats
 
+    if action == TriageAction.SKIP:
+        get_console().print(f"  [info]Skipping PR {_pr_link(pr)} — no action 
taken.[/]")
+        stats.total_skipped_action += 1
+        return
 
-def _close_suspicious_prs(
-    token: str,
-    github_repository: str,
-    author_prs: list[dict],
-    flagged_pr_number: int,
-) -> tuple[int, int]:
-    """Close PRs flagged as suspicious, add label and comment. Returns 
(closed, commented) counts."""
-    closed = 0
-    commented = 0
-    for pr_info in author_prs:
-        pr_num = pr_info["number"]
-        node_id = pr_info["node_id"]
+    if action == TriageAction.READY:
+        get_console().print(
+            f"  [info]Marking PR {_pr_link(pr)} as ready — adding 
'{_READY_FOR_REVIEW_LABEL}' label.[/]"
+        )
+        if _add_label(ctx.token, ctx.github_repository, pr.node_id, 
_READY_FOR_REVIEW_LABEL):
+            get_console().print(
+                f"  [success]Label '{_READY_FOR_REVIEW_LABEL}' added to PR 
{_pr_link(pr)}.[/]"
+            )
+            stats.total_ready += 1
+        else:
+            get_console().print(f"  [warning]Failed to add label to PR 
{_pr_link(pr)}.[/]")
+        return
 
-        if _close_pr(token, node_id):
-            get_console().print(f"  [success]PR 
[link={pr_info['url']}]#{pr_num}[/link] closed.[/]")
-            closed += 1
+    if action == TriageAction.COMMENT:
+        text = comment_only_text or draft_comment
+        get_console().print(f"  Posting comment on PR {_pr_link(pr)}...")
+        if _post_comment(ctx.token, pr.node_id, text):
+            get_console().print(f"  [success]Comment posted on PR 
{_pr_link(pr)}.[/]")
+            stats.total_commented += 1
         else:
-            get_console().print(f"  [error]Failed to close PR 
[link={pr_info['url']}]#{pr_num}[/link].[/]")
-            continue
+            get_console().print(f"  [error]Failed to post comment on PR 
{_pr_link(pr)}.[/]")
+        return
 
-        _add_label(token, github_repository, node_id, 
_SUSPICIOUS_CHANGES_LABEL)
+    if action == TriageAction.DRAFT:
+        get_console().print(f"  Converting PR {_pr_link(pr)} to draft...")
+        if _convert_pr_to_draft(ctx.token, pr.node_id):
+            get_console().print(f"  [success]PR {_pr_link(pr)} converted to 
draft.[/]")
+        else:
+            get_console().print(f"  [error]Failed to convert PR {_pr_link(pr)} 
to draft.[/]")
+            return
+        get_console().print(f"  Posting comment on PR {_pr_link(pr)}...")
+        if _post_comment(ctx.token, pr.node_id, draft_comment):
+            get_console().print(f"  [success]Comment posted on PR 
{_pr_link(pr)}.[/]")
+            stats.total_converted += 1
+        else:
+            get_console().print(f"  [error]Failed to post comment on PR 
{_pr_link(pr)}.[/]")
+        return
 
-        comment = (
-            f"This PR has been **closed** because suspicious changes were 
detected "
-            f"in [PR #{flagged_pr_number}]"
-            
f"(https://github.com/{github_repository}/pull/{flagged_pr_number}) "
-            f"by the same author.\n\n"
-            "All open PRs by this author have been closed as a precaution. "
-            "If you believe this was done in error, please reach out on the "
-            "[Airflow Slack](https://s.apache.org/airflow-slack)."
-        )
-        if _post_comment(token, node_id, comment):
-            commented += 1
+    if action == TriageAction.CLOSE:
+        get_console().print(f"  Closing PR {_pr_link(pr)}...")
+        if _close_pr(ctx.token, pr.node_id):
+            get_console().print(f"  [success]PR {_pr_link(pr)} closed.[/]")
+        else:
+            get_console().print(f"  [error]Failed to close PR 
{_pr_link(pr)}.[/]")
+            return
+        if _add_label(ctx.token, ctx.github_repository, pr.node_id, 
_CLOSED_QUALITY_LABEL):
+            get_console().print(f"  [success]Label '{_CLOSED_QUALITY_LABEL}' 
added to PR {_pr_link(pr)}.[/]")
+        else:
+            get_console().print(f"  [warning]Failed to add label to PR 
{_pr_link(pr)}.[/]")
+        get_console().print(f"  Posting comment on PR {_pr_link(pr)}...")
+        if _post_comment(ctx.token, pr.node_id, close_comment):
+            get_console().print(f"  [success]Comment posted on PR 
{_pr_link(pr)}.[/]")
+            stats.total_closed += 1
+        else:
+            get_console().print(f"  [error]Failed to post comment on PR 
{_pr_link(pr)}.[/]")
 
-    return closed, commented
 
+def _prompt_and_execute_flagged_pr(
+    ctx: TriageContext,
+    pr: PRData,
+    assessment,
+    *,
+    comment_only_text: str | None = None,
+) -> None:
+    """Display a flagged PR panel, prompt user for action, and execute it. 
Mutates ctx.stats."""
+    author_profile = _fetch_author_profile(ctx.token, pr.author_login, 
ctx.github_repository)
 
-def _find_pending_workflow_runs(token: str, github_repository: str, head_sha: 
str) -> list[dict]:
-    """Find workflow runs awaiting approval for a given commit SHA."""
-    import requests
+    draft_comment = _build_comment(
+        pr.author_login, assessment.violations, pr.number, pr.commits_behind, 
pr.base_ref
+    )
+    close_comment = _build_close_comment(
+        pr.author_login,
+        assessment.violations,
+        pr.number,
+        ctx.author_flagged_count.get(pr.author_login, 0),
+    )
+    if comment_only_text is None:
+        comment_only_text = _build_comment(
+            pr.author_login,
+            assessment.violations,
+            pr.number,
+            pr.commits_behind,
+            pr.base_ref,
+            comment_only=True,
+        )
+    _display_pr_panel(pr, author_profile, assessment, draft_comment)
+
+    default_action, reason = _compute_default_action(pr, assessment, 
ctx.author_flagged_count)
+    if default_action == TriageAction.CLOSE:
+        get_console().print(Panel(close_comment, title="Proposed close 
comment", border_style="red"))
+    get_console().print(f"  [bold]{reason}[/]")
+
+    if ctx.dry_run:
+        action_label = {
+            TriageAction.DRAFT: "draft",
+            TriageAction.COMMENT: "add comment",
+            TriageAction.CLOSE: "close",
+            TriageAction.READY: "ready",
+            TriageAction.SKIP: "skip",
+        }.get(default_action, str(default_action))
+        get_console().print(f"[warning]Dry run — would default to: 
{action_label}[/]")
+        return
 
-    url = f"https://api.github.com/repos/{github_repository}/actions/runs";
-    response = requests.get(
-        url,
-        params={"head_sha": head_sha, "status": "action_required", "per_page": 
"50"},
-        headers={"Authorization": f"Bearer {token}", "Accept": 
"application/vnd.github+json"},
-        timeout=30,
+    action = prompt_triage_action(
+        f"Action for PR {_pr_link(pr)}?",
+        default=default_action,
+        forced_answer=ctx.answer_triage,
     )
-    if response.status_code != 200:
-        return []
-    return response.json().get("workflow_runs", [])
 
+    if action == TriageAction.QUIT:
+        get_console().print("[warning]Quitting.[/]")
+        ctx.stats.quit_early = True
+        return
 
-def _approve_workflow_runs(token: str, github_repository: str, pending_runs: 
list[dict]) -> int:
-    """Approve pending workflow runs. Returns number successfully approved."""
-    import requests
+    _execute_triage_action(
+        ctx,
+        pr,
+        action,
+        draft_comment=draft_comment,
+        close_comment=close_comment,
+        comment_only_text=comment_only_text,
+    )
 
-    approved = 0
-    for run in pending_runs:
-        run_id = run["id"]
-        url = 
f"https://api.github.com/repos/{github_repository}/actions/runs/{run_id}/approve";
-        response = requests.post(
-            url,
-            headers={"Authorization": f"Bearer {token}", "Accept": 
"application/vnd.github+json"},
-            timeout=30,
+
+def _display_pr_overview_table(all_prs: list[PRData]) -> None:
+    """Display a Rich table overview of non-collaborator PRs."""
+    non_collab_prs = [pr for pr in all_prs if pr.author_association not in 
_COLLABORATOR_ASSOCIATIONS]
+    collab_count = len(all_prs) - len(non_collab_prs)
+    pr_table = Table(title=f"Fetched PRs ({len(non_collab_prs)} 
non-collaborator)")
+    pr_table.add_column("PR", style="cyan", no_wrap=True)
+    pr_table.add_column("Title", max_width=50)
+    pr_table.add_column("Author")
+    pr_table.add_column("Status")
+    pr_table.add_column("Behind", justify="right")
+    pr_table.add_column("Conflicts")
+    pr_table.add_column("CI Status")
+    for pr in non_collab_prs:
+        if pr.checks_state == "FAILURE":
+            ci_status = "[red]Failing[/]"
+        elif pr.checks_state == "PENDING":
+            ci_status = "[yellow]Pending[/]"
+        elif pr.checks_state == "UNKNOWN":
+            ci_status = "[dim]No checks[/]"
+        else:
+            ci_status = f"[green]{pr.checks_state.capitalize()}[/]"
+        behind_text = f"[yellow]{pr.commits_behind}[/]" if pr.commits_behind > 
0 else "[green]0[/]"
+        if pr.mergeable == "CONFLICTING":
+            conflicts_text = "[red]Yes[/]"
+        elif pr.mergeable == "UNKNOWN":
+            conflicts_text = "[dim]?[/]"
+        else:
+            conflicts_text = "[green]No[/]"
+
+        has_issues = pr.checks_state == "FAILURE" or pr.mergeable == 
"CONFLICTING"
+        overall = "[red]Flag[/]" if has_issues else "[green]OK[/]"
+
+        pr_table.add_row(
+            _pr_link(pr), pr.title[:50], pr.author_login, overall, 
behind_text, conflicts_text, ci_status
         )
-        if response.status_code in (201, 204):
-            approved += 1
+    get_console().print(pr_table)
+    if collab_count:
+        get_console().print(
+            f"  [dim]({collab_count} collaborator/member {'PRs' if 
collab_count != 1 else 'PR'} not shown)[/]"
+        )
+    get_console().print()
+
+
+def _filter_candidate_prs(
+    all_prs: list[PRData],
+    *,
+    include_collaborators: bool,
+    checks_state: str,
+    min_commits_behind: int,
+    max_num: int,
+) -> tuple[list[PRData], int, int, int]:
+    """Filter PRs to candidates. Returns (candidates, skipped_collaborator, 
skipped_bot, skipped_accepted)."""
+    candidate_prs: list[PRData] = []
+    total_skipped_collaborator = 0
+    total_skipped_bot = 0
+    total_skipped_accepted = 0
+    total_skipped_checks_state = 0
+    total_skipped_commits_behind = 0
+    verbose = get_verbose()
+    for pr in all_prs:
+        if not include_collaborators and pr.author_association in 
_COLLABORATOR_ASSOCIATIONS:
+            total_skipped_collaborator += 1
+            if verbose:
+                get_console().print(
+                    f"  [dim]Skipping PR {_pr_link(pr)} by "
+                    f"{pr.author_association.lower()} {pr.author_login}[/]"
+                )
+        elif _is_bot_account(pr.author_login):
+            total_skipped_bot += 1
+            if verbose:
+                get_console().print(f"  [dim]Skipping PR {_pr_link(pr)} — bot 
account {pr.author_login}[/]")
+        elif _READY_FOR_REVIEW_LABEL in pr.labels:
+            total_skipped_accepted += 1
+            if verbose:
+                get_console().print(
+                    f"  [dim]Skipping PR {_pr_link(pr)} — already has 
'{_READY_FOR_REVIEW_LABEL}' label[/]"
+                )
+        elif checks_state != "any" and pr.checks_state.lower() != checks_state:
+            total_skipped_checks_state += 1
+            if verbose:
+                get_console().print(
+                    f"  [dim]Skipping PR {_pr_link(pr)} — checks state 
{pr.checks_state} != {checks_state}[/]"
+                )
+        elif min_commits_behind > 0 and pr.commits_behind < min_commits_behind:
+            total_skipped_commits_behind += 1
+            if verbose:
+                get_console().print(
+                    f"  [dim]Skipping PR {_pr_link(pr)} — only "
+                    f"{pr.commits_behind} commits behind (min: 
{min_commits_behind})[/]"
+                )
         else:
+            candidate_prs.append(pr)
+
+    if max_num and len(candidate_prs) > max_num:
+        candidate_prs = candidate_prs[:max_num]
+
+    skipped_parts: list[str] = []
+    if total_skipped_collaborator:
+        skipped_parts.append(
+            f"{total_skipped_collaborator} "
+            f"{'collaborators' if total_skipped_collaborator != 1 else 
'collaborator'}"
+        )
+    if total_skipped_bot:
+        skipped_parts.append(f"{total_skipped_bot} {'bots' if 
total_skipped_bot != 1 else 'bot'}")
+    if total_skipped_accepted:
+        skipped_parts.append(f"{total_skipped_accepted} ready-for-review")
+    if total_skipped_checks_state:
+        skipped_parts.append(f"{total_skipped_checks_state} checks-state 
mismatch")
+    if total_skipped_commits_behind:
+        skipped_parts.append(f"{total_skipped_commits_behind} below 
min-commits-behind")
+    skipped_text = f"skipped {', '.join(skipped_parts)}, " if skipped_parts 
else ""
+    get_console().print(
+        f"\n[info]Fetched {len(all_prs)} {'PRs' if len(all_prs) != 1 else 
'PR'}, "
+        f"{skipped_text}"
+        f"assessing {len(candidate_prs)} {'PRs' if len(candidate_prs) != 1 
else 'PR'}"
+        f"{f' (capped at {max_num})' if max_num else ''}...[/]\n"
+    )
+    return candidate_prs, total_skipped_collaborator, total_skipped_bot, 
total_skipped_accepted
+
+
+def _enrich_candidate_details(
+    token: str, github_repository: str, candidate_prs: list[PRData], *, 
run_ci: bool
+) -> None:
+    """Fetch check details, resolve unknown mergeable status, and fetch review 
comments."""
+    if not candidate_prs:
+        return
+
+    get_console().print(
+        f"[info]Fetching check details for {len(candidate_prs)} "
+        f"candidate {'PRs' if len(candidate_prs) != 1 else 'PR'}...[/]"
+    )
+    _fetch_check_details_batch(token, github_repository, candidate_prs)
+
+    for pr in candidate_prs:
+        if pr.checks_state == "FAILURE" and not pr.failed_checks and 
pr.head_sha:
             get_console().print(
-                f"  [warning]Failed to approve run {run.get('name', run_id)}: 
{response.status_code}[/]"
+                f"  [dim]Fetching full check details for PR {_pr_link(pr)} "
+                f"(failures beyond first 100 checks)...[/]"
             )
-    return approved
+            pr.failed_checks = _fetch_failed_checks(token, github_repository, 
pr.head_sha)
 
+    unknown_count = sum(1 for pr in candidate_prs if pr.mergeable == "UNKNOWN")
+    if unknown_count:
+        get_console().print(
+            f"[info]Resolving merge conflict status for {unknown_count} "
+            f"{'PRs' if unknown_count != 1 else 'PR'} with unknown 
status...[/]"
+        )
+        resolved = _resolve_unknown_mergeable(token, github_repository, 
candidate_prs)
+        remaining = unknown_count - resolved
+        if remaining:
+            get_console().print(
+                f"  [dim]{resolved} resolved, {remaining} still unknown "
+                f"(GitHub hasn't computed mergeability yet).[/]"
+            )
+        else:
+            get_console().print(f"  [dim]All {resolved} resolved.[/]")
 
-@pr_group.command(
-    name="auto-triage",
-    help="Find open PRs from non-collaborators that don't meet quality 
criteria and convert to draft.",
-)
-@option_github_token
-@option_github_repository
-# --- Target selection ---
[email protected](
-    "--pr",
-    "pr_number",
-    type=int,
-    default=None,
+    if run_ci:
+        get_console().print(
+            f"[info]Fetching review thread details for {len(candidate_prs)} "
+            f"candidate {'PRs' if len(candidate_prs) != 1 else 'PR'}...[/]"
+        )
+        _fetch_unresolved_comments_batch(token, github_repository, 
candidate_prs)
+
+
+def _review_workflow_approval_prs(ctx: TriageContext, pending_approval: 
list[PRData]) -> None:
+    """Present NOT_RUN PRs for workflow approval. Mutates ctx.stats."""
+    if ctx.stats.quit_early or not pending_approval:
+        return
+
+    pending_approval.sort(key=lambda p: (p.author_login.lower(), p.number))
+    get_console().print(
+        f"\n[info]{len(pending_approval)} {'PRs have' if len(pending_approval) 
!= 1 else 'PR has'} "
+        f"no test workflows run — review and approve workflow runs"
+        f"{' (LLM assessments running in background)' if ctx.llm_future_to_pr 
else ''}:[/]\n"
+    )
+    for pr in pending_approval:
+        ctx.collect_llm_progress()
+
+        author_profile = _fetch_author_profile(ctx.token, pr.author_login, 
ctx.github_repository)
+        pending_runs = _find_pending_workflow_runs(ctx.token, 
ctx.github_repository, pr.head_sha)
+        _display_workflow_approval_panel(pr, author_profile, pending_runs)
+
+        # If author exceeds the close threshold, suggest closing instead of 
approving
+        author_count = ctx.author_flagged_count.get(pr.author_login, 0)
+        if author_count > 3:
+            get_console().print(
+                f"  [bold red]Author {pr.author_login} has {author_count} 
flagged "
+                f"{'PRs' if author_count != 1 else 'PR'} "
+                f"— suggesting close instead of workflow approval.[/]"
+            )
+            close_comment = _build_close_comment(pr.author_login, [], 
pr.number, author_count)
+            get_console().print(Panel(close_comment, title="Proposed close 
comment", border_style="red"))
+
+            if ctx.dry_run:
+                get_console().print("[warning]Dry run — would default to: 
close[/]")
+                continue
+
+            action = prompt_triage_action(
+                f"Action for PR {_pr_link(pr)}?",
+                default=TriageAction.CLOSE,
+                forced_answer=ctx.answer_triage,
+            )
+            if action == TriageAction.QUIT:
+                get_console().print("[warning]Quitting.[/]")
+                ctx.stats.quit_early = True
+                return
+            if action == TriageAction.SKIP:
+                get_console().print(f"  [info]Skipping PR {_pr_link(pr)} — no 
action taken.[/]")
+                continue
+            if action == TriageAction.CLOSE:
+                _execute_triage_action(
+                    ctx, pr, TriageAction.CLOSE, draft_comment="", 
close_comment=close_comment
+                )
+                continue
+            # For DRAFT or READY, fall through to normal workflow approval
+
+        if ctx.dry_run:
+            get_console().print("[warning]Dry run — skipping workflow 
approval.[/]")
+            continue
+
+        if not pending_runs:
+            get_console().print(
+                f"  [dim]No pending workflow runs found for PR {_pr_link(pr)}. 
"
+                f"Workflows may need to be triggered manually.[/]"
+            )
+            continue
+
+        answer = user_confirm(
+            f"Review diff for PR {_pr_link(pr)} before approving workflows?",
+            forced_answer=ctx.answer_triage,
+        )
+        if answer == Answer.QUIT:
+            get_console().print("[warning]Quitting.[/]")
+            ctx.stats.quit_early = True
+            return
+        if answer == Answer.NO:
+            get_console().print(f"  [info]Skipping workflow approval for PR 
{_pr_link(pr)}.[/]")
+            continue
+
+        get_console().print(f"  Fetching diff for PR {_pr_link(pr)}...")
+        diff_text = _fetch_pr_diff(ctx.token, ctx.github_repository, pr.number)
+        if diff_text:
+            from rich.syntax import Syntax
+
+            get_console().print(
+                Panel(
+                    Syntax(diff_text, "diff", theme="monokai", word_wrap=True),
+                    title=f"Diff for PR {_pr_link(pr)}",
+                    border_style="bright_cyan",
+                )
+            )
+        else:
+            get_console().print(
+                f"  [warning]Could not fetch diff for PR {_pr_link(pr)}. "
+                f"Review manually at: {pr.url}/files[/]"
+            )
+
+        answer = user_confirm(
+            f"No suspicious changes found in PR {_pr_link(pr)}? "
+            f"Approve {len(pending_runs)} workflow {'runs' if 
len(pending_runs) != 1 else 'run'}?",
+            forced_answer=ctx.answer_triage,
+        )
+        if answer == Answer.QUIT:
+            get_console().print("[warning]Quitting.[/]")
+            ctx.stats.quit_early = True
+            return
+        if answer == Answer.NO:
+            get_console().print(
+                f"\n  [bold red]Suspicious changes detected in PR 
{_pr_link(pr)} by {pr.author_login}.[/]"
+            )
+            get_console().print(f"  Fetching all open PRs by 
{pr.author_login}...")
+            author_prs = _fetch_author_open_prs(ctx.token, 
ctx.github_repository, pr.author_login)
+            if not author_prs:
+                get_console().print(f"  [dim]No open PRs found for 
{pr.author_login}.[/]")
+                continue
+
+            get_console().print()
+            get_console().print(
+                f"  [bold red]The following {len(author_prs)} "
+                f"{'PRs' if len(author_prs) != 1 else 'PR'} by "
+                f"{pr.author_login} will be closed, labeled "
+                f"'{_SUSPICIOUS_CHANGES_LABEL}', and commented:[/]"
+            )
+            for pr_info in author_prs:
+                get_console().print(
+                    f"    - [link={pr_info['url']}]#{pr_info['number']}[/link] 
{pr_info['title']}"
+                )
+            get_console().print()
+
+            confirm = user_confirm(
+                f"Close all {len(author_prs)} {'PRs' if len(author_prs) != 1 
else 'PR'} "
+                f"by {pr.author_login} and label as suspicious?",
+                forced_answer=ctx.answer_triage,
+            )
+            if confirm == Answer.QUIT:
+                get_console().print("[warning]Quitting.[/]")
+                ctx.stats.quit_early = True
+                return
+            if confirm == Answer.NO:
+                get_console().print(f"  [info]Skipping — no PRs closed for 
{pr.author_login}.[/]")
+                continue
+
+            closed, commented = _close_suspicious_prs(ctx.token, 
ctx.github_repository, author_prs, pr.number)
+            get_console().print(
+                f"  [success]Closed {closed}/{len(author_prs)} "
+                f"{'PRs' if len(author_prs) != 1 else 'PR'}, commented on 
{commented}.[/]"
+            )
+            ctx.stats.total_closed += closed
+            continue
+
+        approved = _approve_workflow_runs(ctx.token, ctx.github_repository, 
pending_runs)
+        if approved:
+            get_console().print(
+                f"  [success]Approved {approved}/{len(pending_runs)} workflow "
+                f"{'runs' if len(pending_runs) != 1 else 'run'} for PR "
+                f"{_pr_link(pr)}.[/]"
+            )
+            ctx.stats.total_workflows_approved += 1
+        else:
+            get_console().print(f"  [error]Failed to approve workflow runs for 
PR {_pr_link(pr)}.[/]")
+
+
+def _review_deterministic_flagged_prs(
+    ctx: TriageContext, det_flagged_prs: list[tuple[PRData, PRAssessment]]
+) -> None:
+    """Present deterministically flagged PRs for interactive review. Mutates 
ctx.stats."""
+    if not det_flagged_prs:
+        return
+
+    get_console().print(
+        f"\n[info]Reviewing {len(det_flagged_prs)} deterministically flagged "
+        f"{'PRs' if len(det_flagged_prs) != 1 else 'PR'}"
+        f"{' (LLM assessments running in background)' if ctx.llm_future_to_pr 
else ''}...[/]\n"
+    )
+
+    current_author: str | None = None
+    for pr, assessment in det_flagged_prs:
+        if ctx.stats.quit_early:
+            break
+
+        ctx.collect_llm_progress()
+
+        if pr.author_login != current_author:
+            current_author = pr.author_login
+            count = ctx.author_flagged_count[current_author]
+            get_console().print()
+            get_console().rule(
+                f"[bold]Author: {current_author}[/] ({count} flagged PR{'s' if 
count != 1 else ''})",
+                style="cyan",
+            )
+
+        _prompt_and_execute_flagged_pr(ctx, pr, assessment)
+
+
+def _review_llm_flagged_prs(ctx: TriageContext, llm_candidates: list[PRData]) 
-> None:
+    """Present LLM-flagged PRs as they become ready, without blocking. Mutates 
ctx.stats."""
+    if ctx.stats.quit_early or not ctx.llm_future_to_pr:
+        return
+
+    _collect_llm_results(
+        ctx.llm_future_to_pr, ctx.llm_assessments, ctx.llm_completed, 
ctx.llm_errors, ctx.llm_passing
+    )
+
+    llm_presented: set[int] = set()
+
+    while not ctx.stats.quit_early:
+        new_flagged = [
+            (pr, ctx.llm_assessments[pr.number])
+            for pr in llm_candidates
+            if pr.number in ctx.llm_assessments and pr.number not in 
llm_presented
+        ]
+        new_flagged.sort(key=lambda pair: (pair[0].author_login.lower(), 
pair[0].number))
+
+        if new_flagged:
+            remaining = len(ctx.llm_future_to_pr) - len(ctx.llm_completed)
+            status_parts = 
[f"{len(ctx.llm_completed)}/{len(ctx.llm_future_to_pr)} done"]
+            if remaining:
+                status_parts.append(f"{remaining} still running")
+            get_console().print(
+                f"\n[info]{len(new_flagged)} new LLM-flagged "
+                f"{'PRs' if len(new_flagged) != 1 else 'PR'} ready for review "
+                f"({', '.join(status_parts)}):[/]\n"
+            )
+
+            for pr, assessment in new_flagged:
+                if ctx.stats.quit_early:
+                    break
+                llm_presented.add(pr.number)
+                ctx.author_flagged_count[pr.author_login] = (
+                    ctx.author_flagged_count.get(pr.author_login, 0) + 1
+                )
+
+                _prompt_and_execute_flagged_pr(ctx, pr, assessment)
+
+                # While user was deciding, more results may have arrived
+                _collect_llm_results(
+                    ctx.llm_future_to_pr,
+                    ctx.llm_assessments,
+                    ctx.llm_completed,
+                    ctx.llm_errors,
+                    ctx.llm_passing,
+                )
+
+        if len(ctx.llm_completed) >= len(ctx.llm_future_to_pr):
+            break
+
+        get_console().print(
+            f"[dim]Waiting for {len(ctx.llm_future_to_pr) - 
len(ctx.llm_completed)} "
+            f"remaining LLM "
+            f"{'assessments' if len(ctx.llm_future_to_pr) - 
len(ctx.llm_completed) != 1 else 'assessment'}"
+            f"...[/]"
+        )
+        time.sleep(2)
+        _collect_llm_results(
+            ctx.llm_future_to_pr, ctx.llm_assessments, ctx.llm_completed, 
ctx.llm_errors, ctx.llm_passing
+        )
+
+    get_console().print(
+        f"\n[info]LLM assessment complete: {len(ctx.llm_assessments)} flagged, 
"
+        f"{len(ctx.llm_passing)} passed, {len(ctx.llm_errors)} errors "
+        f"(out of {len(ctx.llm_future_to_pr)} assessed).[/]\n"
+    )
+
+
+def _review_passing_prs(ctx: TriageContext, passing_prs: list[PRData]) -> None:
+    """Present passing PRs for optional ready-for-review marking. Mutates 
ctx.stats."""
+    if ctx.stats.quit_early or not passing_prs:
+        return
+
+    passing_prs.sort(key=lambda p: (p.author_login.lower(), p.number))
+    get_console().print(
+        f"\n[info]{len(passing_prs)} {'PRs pass' if len(passing_prs) != 1 else 
'PR passes'} "
+        f"all checks — review to mark as ready:[/]\n"
+    )
+    for pr in passing_prs:
+        author_profile = _fetch_author_profile(ctx.token, pr.author_login, 
ctx.github_repository)
+        _display_pr_info_panels(pr, author_profile)
+
+        if ctx.dry_run:
+            get_console().print("[warning]Dry run — skipping.[/]")
+            continue
+
+        action = prompt_triage_action(
+            f"Action for PR {_pr_link(pr)}?",
+            default=TriageAction.SKIP,
+            forced_answer=ctx.answer_triage,
+        )
+
+        if action == TriageAction.QUIT:
+            get_console().print("[warning]Quitting.[/]")
+            ctx.stats.quit_early = True
+            return
+
+        if action == TriageAction.READY:
+            get_console().print(
+                f"  [info]Marking PR {_pr_link(pr)} as ready — adding 
'{_READY_FOR_REVIEW_LABEL}' label.[/]"
+            )
+            if _add_label(ctx.token, ctx.github_repository, pr.node_id, 
_READY_FOR_REVIEW_LABEL):
+                get_console().print(
+                    f"  [success]Label '{_READY_FOR_REVIEW_LABEL}' added to PR 
{_pr_link(pr)}.[/]"
+                )
+                ctx.stats.total_ready += 1
+            else:
+                get_console().print(f"  [warning]Failed to add label to PR 
{_pr_link(pr)}.[/]")
+        else:
+            get_console().print(f"  [info]Skipping PR {_pr_link(pr)} — no 
action taken.[/]")
+            ctx.stats.total_skipped_action += 1
+
+
+def _display_triage_summary(
+    all_prs: list[PRData],
+    candidate_prs: list[PRData],
+    passing_prs: list[PRData],
+    pending_approval: list[PRData],
+    stats: TriageStats,
+    *,
+    total_deterministic_flags: int,
+    total_llm_flagged: int,
+    total_llm_errors: int,
+    total_skipped_collaborator: int,
+    total_skipped_bot: int,
+    total_skipped_accepted: int,
+) -> None:
+    """Print the final triage summary table."""
+    total_flagged = total_deterministic_flags + total_llm_flagged
+    verbose = get_verbose()
+
+    get_console().print(
+        f"\n[info]Assessment complete: {total_flagged} {'PRs' if total_flagged 
!= 1 else 'PR'} "
+        f"flagged ({total_deterministic_flags} CI/conflicts/comments, "
+        f"{total_llm_flagged} LLM-flagged"
+        f"{f', {total_llm_errors} LLM errors' if total_llm_errors else ''}"
+        f"{f', {len(pending_approval)} awaiting workflow approval' if 
pending_approval else ''}"
+        f").[/]\n"
+    )
+
+    summary_table = Table(title="Summary")
+    summary_table.add_column("Metric", style="bold")
+    summary_table.add_column("Count", justify="right")
+    total_skipped = total_skipped_collaborator + total_skipped_bot + 
total_skipped_accepted
+    summary_table.add_row("PRs fetched", str(len(all_prs)))
+    if verbose:
+        summary_table.add_row("Collaborators skipped", 
str(total_skipped_collaborator))
+        summary_table.add_row("Bots skipped", str(total_skipped_bot))
+        summary_table.add_row("Ready-for-review skipped", 
str(total_skipped_accepted))
+    summary_table.add_row("PRs skipped (filtered)", str(total_skipped))
+    summary_table.add_row("PRs assessed", str(len(candidate_prs)))
+    summary_table.add_row("Flagged by CI/conflicts/comments", 
str(total_deterministic_flags))
+    summary_table.add_row("Flagged by LLM", str(total_llm_flagged))
+    summary_table.add_row("LLM errors (skipped)", str(total_llm_errors))
+    summary_table.add_row("Total flagged", str(total_flagged))
+    summary_table.add_row("PRs passing all checks", str(len(passing_prs)))
+    summary_table.add_row("PRs converted to draft", str(stats.total_converted))
+    summary_table.add_row("PRs commented (not drafted)", 
str(stats.total_commented))
+    summary_table.add_row("PRs closed", str(stats.total_closed))
+    summary_table.add_row("PRs marked ready for review", 
str(stats.total_ready))
+    summary_table.add_row("PRs skipped (no action)", 
str(stats.total_skipped_action))
+    summary_table.add_row("Awaiting workflow approval", 
str(len(pending_approval)))
+    summary_table.add_row("PRs with workflows approved", 
str(stats.total_workflows_approved))
+    get_console().print(summary_table)
+
+
+def _fetch_pr_diff(token: str, github_repository: str, pr_number: int) -> str 
| None:
+    """Fetch the diff for a PR via GitHub REST API. Returns the diff text or 
None on failure."""
+    import requests
+
+    url = f"https://api.github.com/repos/{github_repository}/pulls/{pr_number}";
+    response = requests.get(
+        url,
+        headers={"Authorization": f"Bearer {token}", "Accept": 
"application/vnd.github.v3.diff"},
+        timeout=60,
+    )
+    if response.status_code != 200:
+        return None
+    return response.text
+
+
+def _fetch_author_open_prs(token: str, github_repository: str, author_login: 
str) -> list[dict]:
+    """Fetch all open PRs by a given author. Returns list of dicts with 
number, url, title, node_id."""
+    search_query = f"repo:{github_repository} type:pr is:open 
author:{author_login}"
+    data = _graphql_request(token, _SEARCH_PRS_QUERY, {"query": search_query, 
"first": 100})
+    results = []
+    for node in data["search"]["nodes"]:
+        if not node:
+            continue
+        results.append(
+            {
+                "number": node["number"],
+                "url": node["url"],
+                "title": node["title"],
+                "node_id": node["id"],
+            }
+        )
+    return results
+
+
+def _close_suspicious_prs(
+    token: str,
+    github_repository: str,
+    author_prs: list[dict],
+    flagged_pr_number: int,
+) -> tuple[int, int]:
+    """Close PRs flagged as suspicious, add label and comment. Returns 
(closed, commented) counts."""
+    closed = 0
+    commented = 0
+    for pr_info in author_prs:
+        pr_num = pr_info["number"]
+        node_id = pr_info["node_id"]
+
+        if _close_pr(token, node_id):
+            get_console().print(f"  [success]PR 
[link={pr_info['url']}]#{pr_num}[/link] closed.[/]")
+            closed += 1
+        else:
+            get_console().print(f"  [error]Failed to close PR 
[link={pr_info['url']}]#{pr_num}[/link].[/]")
+            continue
+
+        _add_label(token, github_repository, node_id, 
_SUSPICIOUS_CHANGES_LABEL)
+
+        comment = (
+            f"This PR has been **closed** because suspicious changes were 
detected "
+            f"in [PR #{flagged_pr_number}]"
+            
f"(https://github.com/{github_repository}/pull/{flagged_pr_number}) "
+            f"by the same author.\n\n"
+            "All open PRs by this author have been closed as a precaution. "
+            "If you believe this was done in error, please reach out on the "
+            "[Airflow Slack](https://s.apache.org/airflow-slack)."
+        )
+        if _post_comment(token, node_id, comment):
+            commented += 1
+
+    return closed, commented
+
+
+def _find_pending_workflow_runs(token: str, github_repository: str, head_sha: 
str) -> list[dict]:
+    """Find workflow runs awaiting approval for a given commit SHA."""
+    import requests
+
+    url = f"https://api.github.com/repos/{github_repository}/actions/runs";
+    response = requests.get(
+        url,
+        params={"head_sha": head_sha, "status": "action_required", "per_page": 
"50"},
+        headers={"Authorization": f"Bearer {token}", "Accept": 
"application/vnd.github+json"},
+        timeout=30,
+    )
+    if response.status_code != 200:
+        return []
+    return response.json().get("workflow_runs", [])
+
+
+def _approve_workflow_runs(token: str, github_repository: str, pending_runs: 
list[dict]) -> int:
+    """Approve pending workflow runs. Returns number successfully approved."""
+    import requests
+
+    approved = 0
+    for run in pending_runs:
+        run_id = run["id"]
+        url = 
f"https://api.github.com/repos/{github_repository}/actions/runs/{run_id}/approve";
+        response = requests.post(
+            url,
+            headers={"Authorization": f"Bearer {token}", "Accept": 
"application/vnd.github+json"},
+            timeout=30,
+        )
+        if response.status_code in (201, 204):
+            approved += 1
+        else:
+            get_console().print(
+                f"  [warning]Failed to approve run {run.get('name', run_id)}: 
{response.status_code}[/]"
+            )
+    return approved
+
+
+@pr_group.command(
+    name="auto-triage",
+    help="Find open PRs from non-collaborators that don't meet quality 
criteria and convert to draft.",
+)
+@option_github_token
+@option_github_repository
+# --- Target selection ---
[email protected](
+    "--pr",
+    "pr_number",
+    type=int,
+    default=None,
     help="Triage a specific PR by number instead of searching.",
 )
 # --- Filter options ---
@@ -1617,229 +2336,81 @@ def auto_triage(
 
     # Resolve --review-requested: resolve to the authenticated user
     review_requested_user: str | None = None
-    if review_requested:
-        review_requested_user = _resolve_viewer_login(token)
-        get_console().print(f"[info]Filtering PRs with review requested for: 
{review_requested_user}[/]")
-
-    # Split labels into exact (for GitHub search) and wildcard (for 
client-side filtering)
-    from fnmatch import fnmatch
-
-    exact_labels = tuple(lbl for lbl in labels if "*" not in lbl and "?" not 
in lbl)
-    wildcard_labels = [lbl for lbl in labels if "*" in lbl or "?" in lbl]
-    exact_exclude_labels = tuple(lbl for lbl in exclude_labels if "*" not in 
lbl and "?" not in lbl)
-    wildcard_exclude_labels = [lbl for lbl in exclude_labels if "*" in lbl or 
"?" in lbl]
-
-    t_total_start = time.monotonic()
-
-    # Phase 1: Lightweight fetch of PRs via GraphQL (no check contexts — fast)
-    t_phase1_start = time.monotonic()
-    if pr_number:
-        get_console().print(f"[info]Fetching PR #{pr_number} via 
GraphQL...[/]")
-        all_prs = [_fetch_single_pr_graphql(token, github_repository, 
pr_number)]
-    else:
-        get_console().print("[info]Fetching PRs via GraphQL...[/]")
-        all_prs = _fetch_prs_graphql(
-            token,
-            github_repository,
-            labels=exact_labels,
-            exclude_labels=exact_exclude_labels,
-            filter_user=filter_user,
-            sort=sort,
-            batch_size=batch_size,
-            created_after=created_after,
-            created_before=created_before,
-            updated_after=updated_after,
-            updated_before=updated_before,
-            review_requested=review_requested_user,
-        )
-
-    # Apply wildcard label filters client-side
-    if wildcard_labels:
-        all_prs = [
-            pr for pr in all_prs if any(fnmatch(lbl, pat) for pat in 
wildcard_labels for lbl in pr.labels)
-        ]
-    if wildcard_exclude_labels:
-        all_prs = [
-            pr
-            for pr in all_prs
-            if not any(fnmatch(lbl, pat) for pat in wildcard_exclude_labels 
for lbl in pr.labels)
-        ]
-
-    # Resolve how far behind base branch each PR is (chunked GraphQL)
-    get_console().print("[info]Checking how far behind base branch each PR 
is...[/]")
-    behind_map = _fetch_commits_behind_batch(token, github_repository, all_prs)
-    for pr in all_prs:
-        pr.commits_behind = behind_map.get(pr.number, 0)
-
-    # Display fetched PRs overview (skip collaborator PRs — they'll be 
summarised below)
-    non_collab_prs = [pr for pr in all_prs if pr.author_association not in 
_COLLABORATOR_ASSOCIATIONS]
-    collab_count = len(all_prs) - len(non_collab_prs)
-    pr_table = Table(title=f"Fetched PRs ({len(non_collab_prs)} 
non-collaborator)")
-    pr_table.add_column("PR", style="cyan", no_wrap=True)
-    pr_table.add_column("Title", max_width=50)
-    pr_table.add_column("Author")
-    pr_table.add_column("Status")
-    pr_table.add_column("Behind", justify="right")
-    pr_table.add_column("Conflicts")
-    pr_table.add_column("CI Status")
-    for pr in non_collab_prs:
-        if pr.checks_state == "FAILURE":
-            ci_status = "[red]Failing[/]"
-        elif pr.checks_state == "PENDING":
-            ci_status = "[yellow]Pending[/]"
-        elif pr.checks_state == "UNKNOWN":
-            ci_status = "[dim]No checks[/]"
-        else:
-            ci_status = f"[green]{pr.checks_state.capitalize()}[/]"
-        if pr.commits_behind > 0:
-            behind_text = f"[yellow]{pr.commits_behind}[/]"
-        else:
-            behind_text = "[green]0[/]"
-        if pr.mergeable == "CONFLICTING":
-            conflicts_text = "[red]Yes[/]"
-        elif pr.mergeable == "UNKNOWN":
-            conflicts_text = "[dim]?[/]"
-        else:
-            conflicts_text = "[green]No[/]"
-
-        has_issues = pr.checks_state == "FAILURE" or pr.mergeable == 
"CONFLICTING"
-        overall = "[red]Flag[/]" if has_issues else "[green]OK[/]"
-
-        pr_table.add_row(
-            _pr_link(pr),
-            pr.title[:50],
-            pr.author_login,
-            overall,
-            behind_text,
-            conflicts_text,
-            ci_status,
-        )
-    get_console().print(pr_table)
-    if collab_count:
-        get_console().print(
-            f"  [dim]({collab_count} collaborator/member {'PRs' if 
collab_count != 1 else 'PR'} not shown)[/]"
-        )
-    get_console().print()
-
-    t_phase1_end = time.monotonic()
-
-    # Phase 2: Filter out collaborators, bots, and ready-for-review PRs, then 
apply post-fetch filters
-    candidate_prs: list[PRData] = []
-    total_skipped_collaborator = 0
-    total_skipped_bot = 0
-    total_skipped_accepted = 0
-    total_skipped_checks_state = 0
-    total_skipped_commits_behind = 0
-    verbose = get_verbose()
-    for pr in all_prs:
-        if not include_collaborators and pr.author_association in 
_COLLABORATOR_ASSOCIATIONS:
-            total_skipped_collaborator += 1
-            if verbose:
-                get_console().print(
-                    f"  [dim]Skipping PR {_pr_link(pr)} by "
-                    f"{pr.author_association.lower()} {pr.author_login}[/]"
-                )
-        elif _is_bot_account(pr.author_login):
-            total_skipped_bot += 1
-            if verbose:
-                get_console().print(f"  [dim]Skipping PR {_pr_link(pr)} — bot 
account {pr.author_login}[/]")
-        elif _READY_FOR_REVIEW_LABEL in pr.labels:
-            total_skipped_accepted += 1
-            if verbose:
-                get_console().print(
-                    f"  [dim]Skipping PR {_pr_link(pr)} — already has 
'{_READY_FOR_REVIEW_LABEL}' label[/]"
-                )
-        elif checks_state != "any" and pr.checks_state.lower() != checks_state:
-            total_skipped_checks_state += 1
-            if verbose:
-                get_console().print(
-                    f"  [dim]Skipping PR {_pr_link(pr)} — checks state 
{pr.checks_state} != {checks_state}[/]"
-                )
-        elif min_commits_behind > 0 and pr.commits_behind < min_commits_behind:
-            total_skipped_commits_behind += 1
-            if verbose:
-                get_console().print(
-                    f"  [dim]Skipping PR {_pr_link(pr)} — only "
-                    f"{pr.commits_behind} commits behind (min: 
{min_commits_behind})[/]"
-                )
-        else:
-            candidate_prs.append(pr)
-
-    if max_num and len(candidate_prs) > max_num:
-        candidate_prs = candidate_prs[:max_num]
-
-    skipped_parts: list[str] = []
-    if total_skipped_collaborator:
-        skipped_parts.append(
-            f"{total_skipped_collaborator} "
-            f"{'collaborators' if total_skipped_collaborator != 1 else 
'collaborator'}"
-        )
-    if total_skipped_bot:
-        skipped_parts.append(f"{total_skipped_bot} {'bots' if 
total_skipped_bot != 1 else 'bot'}")
-    if total_skipped_accepted:
-        skipped_parts.append(f"{total_skipped_accepted} ready-for-review")
-    if total_skipped_checks_state:
-        skipped_parts.append(f"{total_skipped_checks_state} checks-state 
mismatch")
-    if total_skipped_commits_behind:
-        skipped_parts.append(f"{total_skipped_commits_behind} below 
min-commits-behind")
-    skipped_text = f"skipped {', '.join(skipped_parts)}, " if skipped_parts 
else ""
-    get_console().print(
-        f"\n[info]Fetched {len(all_prs)} {'PRs' if len(all_prs) != 1 else 
'PR'}, "
-        f"{skipped_text}"
-        f"assessing {len(candidate_prs)} {'PRs' if len(candidate_prs) != 1 
else 'PR'}"
-        f"{f' (capped at {max_num})' if max_num else ''}...[/]\n"
-    )
+    if review_requested:
+        review_requested_user = _resolve_viewer_login(token)
+        get_console().print(f"[info]Filtering PRs with review requested for: 
{review_requested_user}[/]")
 
-    # Phase 2b: Fetch detailed check contexts only for candidate PRs (chunked 
to avoid timeouts)
-    t_phase2b_start = time.monotonic()
-    if candidate_prs:
-        get_console().print(
-            f"[info]Fetching check details for {len(candidate_prs)} "
-            f"candidate {'PRs' if len(candidate_prs) != 1 else 'PR'}...[/]"
-        )
-        _fetch_check_details_batch(token, github_repository, candidate_prs)
+    # Phase 1: Fetch PRs via GraphQL
+    from fnmatch import fnmatch
 
-        # For PRs with >100 checks where failures weren't found, paginate 
individually
-        for pr in candidate_prs:
-            if pr.checks_state == "FAILURE" and not pr.failed_checks and 
pr.head_sha:
-                get_console().print(
-                    f"  [dim]Fetching full check details for PR {_pr_link(pr)} 
"
-                    f"(failures beyond first 100 checks)...[/]"
-                )
-                pr.failed_checks = _fetch_failed_checks(token, 
github_repository, pr.head_sha)
+    exact_labels = tuple(lbl for lbl in labels if "*" not in lbl and "?" not 
in lbl)
+    wildcard_labels = [lbl for lbl in labels if "*" in lbl or "?" in lbl]
+    exact_exclude_labels = tuple(lbl for lbl in exclude_labels if "*" not in 
lbl and "?" not in lbl)
+    wildcard_exclude_labels = [lbl for lbl in exclude_labels if "*" in lbl or 
"?" in lbl]
 
-    # Phase 2b2: Resolve UNKNOWN mergeable status via REST API
-    unknown_count = sum(1 for pr in candidate_prs if pr.mergeable == "UNKNOWN")
-    if unknown_count:
-        get_console().print(
-            f"[info]Resolving merge conflict status for {unknown_count} "
-            f"{'PRs' if unknown_count != 1 else 'PR'} with unknown 
status...[/]"
+    t_total_start = time.monotonic()
+
+    # Phase 1: Lightweight fetch of PRs via GraphQL (no check contexts — fast)
+    t_phase1_start = time.monotonic()
+    if pr_number:
+        get_console().print(f"[info]Fetching PR #{pr_number} via 
GraphQL...[/]")
+        all_prs = [_fetch_single_pr_graphql(token, github_repository, 
pr_number)]
+    else:
+        get_console().print("[info]Fetching PRs via GraphQL...[/]")
+        all_prs = _fetch_prs_graphql(
+            token,
+            github_repository,
+            labels=exact_labels,
+            exclude_labels=exact_exclude_labels,
+            filter_user=filter_user,
+            sort=sort,
+            batch_size=batch_size,
+            created_after=created_after,
+            created_before=created_before,
+            updated_after=updated_after,
+            updated_before=updated_before,
+            review_requested=review_requested_user,
         )
-        resolved = _resolve_unknown_mergeable(token, github_repository, 
candidate_prs)
-        remaining = unknown_count - resolved
-        if remaining:
-            get_console().print(
-                f"  [dim]{resolved} resolved, {remaining} still unknown "
-                f"(GitHub hasn't computed mergeability yet).[/]"
-            )
-        else:
-            get_console().print(f"  [dim]All {resolved} resolved.[/]")
 
-    t_phase2b_end = time.monotonic()
+    # Apply wildcard label filters client-side
+    if wildcard_labels:
+        all_prs = [
+            pr for pr in all_prs if any(fnmatch(lbl, pat) for pat in 
wildcard_labels for lbl in pr.labels)
+        ]
+    if wildcard_exclude_labels:
+        all_prs = [
+            pr
+            for pr in all_prs
+            if not any(fnmatch(lbl, pat) for pat in wildcard_exclude_labels 
for lbl in pr.labels)
+        ]
 
-    # Phase 2c: Fetch unresolved review comment counts for candidate PRs
-    if candidate_prs and run_ci:
-        get_console().print(
-            f"[info]Fetching review thread details for {len(candidate_prs)} "
-            f"candidate {'PRs' if len(candidate_prs) != 1 else 'PR'}...[/]"
+    # Resolve how far behind base branch each PR is
+    get_console().print("[info]Checking how far behind base branch each PR 
is...[/]")
+    behind_map = _fetch_commits_behind_batch(token, github_repository, all_prs)
+    for pr in all_prs:
+        pr.commits_behind = behind_map.get(pr.number, 0)
+
+    # Display overview and filter candidates
+    _display_pr_overview_table(all_prs)
+
+    candidate_prs, total_skipped_collaborator, total_skipped_bot, 
total_skipped_accepted = (
+        _filter_candidate_prs(
+            all_prs,
+            include_collaborators=include_collaborators,
+            checks_state=checks_state,
+            min_commits_behind=min_commits_behind,
+            max_num=max_num,
         )
-        _fetch_unresolved_comments_batch(token, github_repository, 
candidate_prs)
+    )
 
-    t_phase2c_end = time.monotonic()
+    t_phase1_end = time.monotonic()
+
+    # Enrich candidate PRs with check details, mergeable status, and review 
comments
+    t_enrich_start = time.monotonic()
+    _enrich_candidate_details(token, github_repository, candidate_prs, 
run_ci=run_ci)
+    t_enrich_end = time.monotonic()
 
-    # Phase 3: Deterministic checks (CI failures + merge conflicts + 
unresolved comments),
-    # then LLM for the rest
-    # PRs with NOT_RUN checks are separated for workflow approval instead of 
LLM assessment.
+    # Phase 3: Deterministic checks + categorize PRs
     assessments: dict[int, PRAssessment] = {}
     llm_candidates: list[PRData] = []
     passing_prs: list[PRData] = []
@@ -1854,7 +2425,6 @@ def auto_triage(
             conflict_assessment = assess_pr_conflicts(pr.number, pr.mergeable, 
pr.base_ref, pr.commits_behind)
             comments_assessment = assess_pr_unresolved_comments(pr.number, 
pr.unresolved_review_comments)
 
-            # Merge violations from all deterministic checks
             if ci_assessment or conflict_assessment or comments_assessment:
                 total_deterministic_flags += 1
                 violations = []
@@ -1898,7 +2468,7 @@ def auto_triage(
             f"{'PRs' if len(pending_approval) != 1 else 'PR'} awaiting 
workflow approval.[/]\n"
         )
 
-    # Phase 4: Start LLM assessments in background (non-blocking) while we 
process deterministic flags
+    # Phase 4: Start LLM assessments in background (non-blocking)
     llm_future_to_pr: dict = {}
     llm_assessments: dict[int, PRAssessment] = {}
     llm_completed: list = []
@@ -1935,620 +2505,66 @@ def auto_triage(
             for pr in llm_candidates
         }
 
-    # Phase 4b: Present NOT_RUN PRs for workflow approval while LLM runs
-    total_workflows_approved = 0
-    total_converted = 0
-    total_commented = 0
-    total_closed = 0
-    total_ready = 0
-    total_skipped_action = 0
+    # Build shared triage context and stats
     pr_actions: dict[int, str] = {}  # PR number -> action taken by user
-
-    quit_early = False
-    # author_flagged_count is populated below after deterministic flagged PRs 
are built,
-    # but we need a preliminary version for the workflow approval phase
     from collections import Counter
 
     author_flagged_count: dict[str, int] = dict(
         Counter(pr.author_login for pr in candidate_prs if pr.number in 
assessments)
     )
+    stats = TriageStats()
+    ctx = TriageContext(
+        token=token,
+        github_repository=github_repository,
+        dry_run=dry_run,
+        answer_triage=answer_triage,
+        stats=stats,
+        author_flagged_count=author_flagged_count,
+        llm_future_to_pr=llm_future_to_pr,
+        llm_assessments=llm_assessments,
+        llm_completed=llm_completed,
+        llm_errors=llm_errors,
+        llm_passing=llm_passing,
+    )
 
-    if not quit_early and pending_approval:
-        pending_approval.sort(key=lambda p: (p.author_login.lower(), p.number))
-        get_console().print(
-            f"\n[info]{len(pending_approval)} {'PRs have' if 
len(pending_approval) != 1 else 'PR has'} "
-            f"no test workflows run — review and approve workflow runs"
-            f"{' (LLM assessments running in background)' if llm_future_to_pr 
else ''}:[/]\n"
-        )
-        for pr in pending_approval:
-            # Collect any completed LLM results and show progress
-            if llm_future_to_pr:
-                _collect_llm_results(
-                    llm_future_to_pr, llm_assessments, llm_completed, 
llm_errors, llm_passing
-                )
-                progress = _llm_progress_status(
-                    len(llm_completed),
-                    len(llm_future_to_pr),
-                    len(llm_assessments),
-                    len(llm_errors),
-                )
-                if progress:
-                    get_console().print(progress)
-
-            author_profile = _fetch_author_profile(token, pr.author_login, 
github_repository)
-            pending_runs = _find_pending_workflow_runs(token, 
github_repository, pr.head_sha)
-            _display_workflow_approval_panel(pr, author_profile, pending_runs)
-
-            # If author exceeds the close threshold, suggest closing instead 
of approving
-            author_count = author_flagged_count.get(pr.author_login, 0)
-            if author_count > 3:
-                get_console().print(
-                    f"  [bold red]Author {pr.author_login} has {author_count} 
flagged "
-                    f"{'PRs' if author_count != 1 else 'PR'} "
-                    f"— suggesting close instead of workflow approval.[/]"
-                )
-                close_comment = _build_close_comment(pr.author_login, [], 
pr.number, author_count)
-                get_console().print(Panel(close_comment, title="Proposed close 
comment", border_style="red"))
-
-                if dry_run:
-                    get_console().print("[warning]Dry run — would default to: 
close[/]")
-                    continue
-
-                action = prompt_triage_action(
-                    f"Action for PR {_pr_link(pr)}?",
-                    default=TriageAction.CLOSE,
-                    forced_answer=answer_triage,
-                )
-                if action == TriageAction.QUIT:
-                    get_console().print("[warning]Quitting.[/]")
-                    quit_early = True
-                    break
-                if action == TriageAction.SKIP:
-                    get_console().print(f"  [info]Skipping PR {_pr_link(pr)} — 
no action taken.[/]")
-                    pr_actions[pr.number] = "skipped"
-                    continue
-                if action == TriageAction.CLOSE:
-                    get_console().print(f"  Closing PR {_pr_link(pr)}...")
-                    if _close_pr(token, pr.node_id):
-                        get_console().print(f"  [success]PR {_pr_link(pr)} 
closed.[/]")
-                    else:
-                        get_console().print(f"  [error]Failed to close PR 
{_pr_link(pr)}.[/]")
-                        continue
-                    if _add_label(token, github_repository, pr.node_id, 
_CLOSED_QUALITY_LABEL):
-                        get_console().print(
-                            f"  [success]Label '{_CLOSED_QUALITY_LABEL}' added 
to PR {_pr_link(pr)}.[/]"
-                        )
-                    else:
-                        get_console().print(f"  [warning]Failed to add label 
to PR {_pr_link(pr)}.[/]")
-                    get_console().print(f"  Posting comment on PR 
{_pr_link(pr)}...")
-                    if _post_comment(token, pr.node_id, close_comment):
-                        get_console().print(f"  [success]Comment posted on PR 
{_pr_link(pr)}.[/]")
-                        total_closed += 1
-                        pr_actions[pr.number] = "closed"
-                    else:
-                        get_console().print(f"  [error]Failed to post comment 
on PR {_pr_link(pr)}.[/]")
-                    continue
-                # For DRAFT or READY, fall through to normal workflow approval
-
-            if dry_run:
-                get_console().print("[warning]Dry run — skipping workflow 
approval.[/]")
-                continue
-
-            if not pending_runs:
-                get_console().print(
-                    f"  [dim]No pending workflow runs found for PR 
{_pr_link(pr)}. "
-                    f"Workflows may need to be triggered manually.[/]"
-                )
-                continue
-
-            answer = user_confirm(
-                f"Review diff for PR {_pr_link(pr)} before approving 
workflows?",
-                forced_answer=answer_triage,
-            )
-            if answer == Answer.QUIT:
-                get_console().print("[warning]Quitting.[/]")
-                quit_early = True
-                break
-            if answer == Answer.NO:
-                get_console().print(f"  [info]Skipping workflow approval for 
PR {_pr_link(pr)}.[/]")
-                continue
-
-            get_console().print(f"  Fetching diff for PR {_pr_link(pr)}...")
-            diff_text = _fetch_pr_diff(token, github_repository, pr.number)
-            if diff_text:
-                from rich.syntax import Syntax
-
-                get_console().print(
-                    Panel(
-                        Syntax(diff_text, "diff", theme="monokai", 
word_wrap=True),
-                        title=f"Diff for PR {_pr_link(pr)}",
-                        border_style="bright_cyan",
-                    )
-                )
-            else:
-                get_console().print(
-                    f"  [warning]Could not fetch diff for PR {_pr_link(pr)}. "
-                    f"Review manually at: {pr.url}/files[/]"
-                )
-
-            answer = user_confirm(
-                f"No suspicious changes found in PR {_pr_link(pr)}? "
-                f"Approve {len(pending_runs)} workflow {'runs' if 
len(pending_runs) != 1 else 'run'}?",
-                forced_answer=answer_triage,
-            )
-            if answer == Answer.QUIT:
-                get_console().print("[warning]Quitting.[/]")
-                quit_early = True
-                break
-            if answer == Answer.NO:
-                get_console().print(
-                    f"\n  [bold red]Suspicious changes detected in PR 
{_pr_link(pr)} by {pr.author_login}.[/]"
-                )
-                get_console().print(f"  Fetching all open PRs by 
{pr.author_login}...")
-                author_prs = _fetch_author_open_prs(token, github_repository, 
pr.author_login)
-                if not author_prs:
-                    get_console().print(f"  [dim]No open PRs found for 
{pr.author_login}.[/]")
-                    continue
-
-                get_console().print()
-                get_console().print(
-                    f"  [bold red]The following {len(author_prs)} "
-                    f"{'PRs' if len(author_prs) != 1 else 'PR'} by "
-                    f"{pr.author_login} will be closed, labeled "
-                    f"'{_SUSPICIOUS_CHANGES_LABEL}', and commented:[/]"
-                )
-                for pr_info in author_prs:
-                    get_console().print(
-                        f"    - 
[link={pr_info['url']}]#{pr_info['number']}[/link] {pr_info['title']}"
-                    )
-                get_console().print()
-
-                confirm = user_confirm(
-                    f"Close all {len(author_prs)} {'PRs' if len(author_prs) != 
1 else 'PR'} "
-                    f"by {pr.author_login} and label as suspicious?",
-                    forced_answer=answer_triage,
-                )
-                if confirm == Answer.QUIT:
-                    get_console().print("[warning]Quitting.[/]")
-                    quit_early = True
-                    break
-                if confirm == Answer.NO:
-                    get_console().print(f"  [info]Skipping — no PRs closed for 
{pr.author_login}.[/]")
-                    continue
-
-                closed, commented = _close_suspicious_prs(token, 
github_repository, author_prs, pr.number)
-                get_console().print(
-                    f"  [success]Closed {closed}/{len(author_prs)} "
-                    f"{'PRs' if len(author_prs) != 1 else 'PR'}, commented on 
{commented}.[/]"
-                )
-                total_closed += closed
-                pr_actions[pr.number] = "suspicious"
-                continue
-
-            approved = _approve_workflow_runs(token, github_repository, 
pending_runs)
-            if approved:
-                get_console().print(
-                    f"  [success]Approved {approved}/{len(pending_runs)} 
workflow "
-                    f"{'runs' if len(pending_runs) != 1 else 'run'} for PR "
-                    f"{_pr_link(pr)}.[/]"
-                )
-                total_workflows_approved += 1
-                pr_actions[pr.number] = "approved"
-            else:
-                get_console().print(f"  [error]Failed to approve workflow runs 
for PR {_pr_link(pr)}.[/]")
-
-    # Phase 5a: Present deterministically flagged PRs for interactive review
-    # LLM assessments continue running in background during this phase
+    # Phase 4b: Present NOT_RUN PRs for workflow approval (LLM runs in 
background)
+    _review_workflow_approval_prs(ctx, pending_approval)
 
-    # Build sorted list of deterministic-flagged PRs grouped by author
+    # Phase 5a: Present deterministically flagged PRs
     det_flagged_prs = [(pr, assessments[pr.number]) for pr in candidate_prs if 
pr.number in assessments]
     det_flagged_prs.sort(key=lambda pair: (pair[0].author_login.lower(), 
pair[0].number))
+    _review_deterministic_flagged_prs(ctx, det_flagged_prs)
 
-    if det_flagged_prs:
-        get_console().print(
-            f"\n[info]Reviewing {len(det_flagged_prs)} deterministically 
flagged "
-            f"{'PRs' if len(det_flagged_prs) != 1 else 'PR'}"
-            f"{' (LLM assessments running in background)' if llm_future_to_pr 
else ''}...[/]\n"
-        )
-
-    current_author: str | None = None
-    for pr, assessment in det_flagged_prs:
-        if pr.author_login != current_author:
-            current_author = pr.author_login
-            count = author_flagged_count[current_author]
-            get_console().print()
-            get_console().rule(
-                f"[bold]Author: {current_author}[/] ({count} flagged PR{'s' if 
count != 1 else ''})",
-                style="cyan",
-            )
-
-        # Collect any completed LLM results and show progress
-        if llm_future_to_pr:
-            _collect_llm_results(llm_future_to_pr, llm_assessments, 
llm_completed, llm_errors, llm_passing)
-            progress = _llm_progress_status(
-                len(llm_completed),
-                len(llm_future_to_pr),
-                len(llm_assessments),
-                len(llm_errors),
-            )
-            if progress:
-                get_console().print(progress)
-
-        # Fetch author profile for context (only for flagged PRs)
-        author_profile = _fetch_author_profile(token, pr.author_login, 
github_repository)
-
-        comment = _build_comment(
-            pr.author_login, assessment.violations, pr.number, 
pr.commits_behind, pr.base_ref
-        )
-        close_comment = _build_close_comment(
-            pr.author_login,
-            assessment.violations,
-            pr.number,
-            author_flagged_count.get(pr.author_login, 0),
-        )
-        _display_pr_panel(pr, author_profile, assessment, comment)
-
-        default_action, reason = _compute_default_action(pr, assessment, 
author_flagged_count)
-        if default_action == TriageAction.CLOSE:
-            get_console().print(Panel(close_comment, title="Proposed close 
comment", border_style="red"))
-        get_console().print(f"  [bold]{reason}[/]")
-
-        if dry_run:
-            action_label = {
-                TriageAction.DRAFT: "draft",
-                TriageAction.CLOSE: "close",
-                TriageAction.READY: "ready",
-                TriageAction.SKIP: "skip",
-            }[default_action]
-            get_console().print(f"[warning]Dry run — would default to: 
{action_label}[/]")
-            continue
-
-        action = prompt_triage_action(
-            f"Action for PR {_pr_link(pr)}?",
-            default=default_action,
-            forced_answer=answer_triage,
-        )
-
-        if action == TriageAction.QUIT:
-            get_console().print("[warning]Quitting.[/]")
-            quit_early = True
-            break
-
-        if action == TriageAction.SKIP:
-            get_console().print(f"  [info]Skipping PR {_pr_link(pr)} — no 
action taken.[/]")
-            total_skipped_action += 1
-            pr_actions[pr.number] = "skipped"
-            continue
-
-        if action == TriageAction.READY:
-            get_console().print(
-                f"  [info]Marking PR {_pr_link(pr)} as ready — adding 
'{_READY_FOR_REVIEW_LABEL}' label.[/]"
-            )
-            if _add_label(token, github_repository, pr.node_id, 
_READY_FOR_REVIEW_LABEL):
-                get_console().print(
-                    f"  [success]Label '{_READY_FOR_REVIEW_LABEL}' added to PR 
{_pr_link(pr)}.[/]"
-                )
-                total_ready += 1
-                pr_actions[pr.number] = "ready"
-            else:
-                get_console().print(f"  [warning]Failed to add label to PR 
{_pr_link(pr)}.[/]")
-            continue
-
-        if action == TriageAction.DRAFT:
-            get_console().print(f"  Converting PR {_pr_link(pr)} to draft...")
-            if _convert_pr_to_draft(token, pr.node_id):
-                get_console().print(f"  [success]PR {_pr_link(pr)} converted 
to draft.[/]")
-            else:
-                get_console().print(f"  [error]Failed to convert PR 
{_pr_link(pr)} to draft.[/]")
-                continue
-
-            get_console().print(f"  Posting comment on PR {_pr_link(pr)}...")
-            if _post_comment(token, pr.node_id, comment):
-                get_console().print(f"  [success]Comment posted on PR 
{_pr_link(pr)}.[/]")
-                total_converted += 1
-                pr_actions[pr.number] = "drafted"
-            else:
-                get_console().print(f"  [error]Failed to post comment on PR 
{_pr_link(pr)}.[/]")
-            continue
-
-        if action == TriageAction.CLOSE:
-            get_console().print(f"  Closing PR {_pr_link(pr)}...")
-            if _close_pr(token, pr.node_id):
-                get_console().print(f"  [success]PR {_pr_link(pr)} closed.[/]")
-            else:
-                get_console().print(f"  [error]Failed to close PR 
{_pr_link(pr)}.[/]")
-                continue
-
-            if _add_label(token, github_repository, pr.node_id, 
_CLOSED_QUALITY_LABEL):
-                get_console().print(
-                    f"  [success]Label '{_CLOSED_QUALITY_LABEL}' added to PR 
{_pr_link(pr)}.[/]"
-                )
-            else:
-                get_console().print(f"  [warning]Failed to add label to PR 
{_pr_link(pr)}.[/]")
-
-            get_console().print(f"  Posting comment on PR {_pr_link(pr)}...")
-            if _post_comment(token, pr.node_id, close_comment):
-                get_console().print(f"  [success]Comment posted on PR 
{_pr_link(pr)}.[/]")
-                total_closed += 1
-                pr_actions[pr.number] = "closed"
-            else:
-                get_console().print(f"  [error]Failed to post comment on PR 
{_pr_link(pr)}.[/]")
-
-    # Phase 5b: Present LLM-flagged PRs as they become ready (no blocking wait)
-    total_llm_errors = 0
-    llm_presented: set[int] = set()  # PR numbers already presented to user
-    if not quit_early and llm_future_to_pr:
-        # Collect whatever has completed so far
-        _collect_llm_results(llm_future_to_pr, llm_assessments, llm_completed, 
llm_errors, llm_passing)
-
-        # Loop: present flagged PRs as they arrive, keep collecting new results
-        while not quit_early:
-            # Find newly flagged PRs not yet presented
-            new_flagged = [
-                (pr, llm_assessments[pr.number])
-                for pr in llm_candidates
-                if pr.number in llm_assessments and pr.number not in 
llm_presented
-            ]
-            new_flagged.sort(key=lambda pair: (pair[0].author_login.lower(), 
pair[0].number))
-
-            if new_flagged:
-                remaining = len(llm_future_to_pr) - len(llm_completed)
-                status_parts = [f"{len(llm_completed)}/{len(llm_future_to_pr)} 
done"]
-                if remaining:
-                    status_parts.append(f"{remaining} still running")
-                get_console().print(
-                    f"\n[info]{len(new_flagged)} new LLM-flagged "
-                    f"{'PRs' if len(new_flagged) != 1 else 'PR'} ready for 
review "
-                    f"({', '.join(status_parts)}):[/]\n"
-                )
-
-                for pr, assessment in new_flagged:
-                    llm_presented.add(pr.number)
-                    author_flagged_count[pr.author_login] = 
author_flagged_count.get(pr.author_login, 0) + 1
-
-                    author_profile = _fetch_author_profile(token, 
pr.author_login, github_repository)
-
-                    comment = _build_comment(
-                        pr.author_login, assessment.violations, pr.number, 
pr.commits_behind, pr.base_ref
-                    )
-                    comment_only = _build_comment(
-                        pr.author_login,
-                        assessment.violations,
-                        pr.number,
-                        pr.commits_behind,
-                        pr.base_ref,
-                        comment_only=True,
-                    )
-                    close_comment = _build_close_comment(
-                        pr.author_login,
-                        assessment.violations,
-                        pr.number,
-                        author_flagged_count.get(pr.author_login, 0),
-                    )
-                    _display_pr_panel(pr, author_profile, assessment, comment)
-
-                    default_action, reason = _compute_default_action(pr, 
assessment, author_flagged_count)
-                    if default_action == TriageAction.CLOSE:
-                        get_console().print(
-                            Panel(close_comment, title="Proposed close 
comment", border_style="red")
-                        )
-                    get_console().print(f"  [bold]{reason}[/]")
-
-                    if dry_run:
-                        action_label = {
-                            TriageAction.DRAFT: "draft",
-                            TriageAction.COMMENT: "add comment",
-                            TriageAction.CLOSE: "close",
-                            TriageAction.READY: "ready",
-                            TriageAction.SKIP: "skip",
-                        }[default_action]
-                        get_console().print(f"[warning]Dry run — would default 
to: {action_label}[/]")
-                        # Collect more results while in dry-run
-                        _collect_llm_results(
-                            llm_future_to_pr, llm_assessments, llm_completed, 
llm_errors, llm_passing
-                        )
-                        continue
-
-                    action = prompt_triage_action(
-                        f"Action for PR {_pr_link(pr)}?",
-                        default=default_action,
-                        forced_answer=answer_triage,
-                    )
-
-                    # While user was deciding, more results may have arrived
-                    _collect_llm_results(
-                        llm_future_to_pr, llm_assessments, llm_completed, 
llm_errors, llm_passing
-                    )
-
-                    if action == TriageAction.QUIT:
-                        get_console().print("[warning]Quitting.[/]")
-                        quit_early = True
-                        break
-
-                    if action == TriageAction.SKIP:
-                        get_console().print(f"  [info]Skipping PR 
{_pr_link(pr)} — no action taken.[/]")
-                        total_skipped_action += 1
-                        continue
-
-                    if action == TriageAction.READY:
-                        get_console().print(
-                            f"  [info]Marking PR {_pr_link(pr)} as ready "
-                            f"— adding '{_READY_FOR_REVIEW_LABEL}' label.[/]"
-                        )
-                        if _add_label(token, github_repository, pr.node_id, 
_READY_FOR_REVIEW_LABEL):
-                            get_console().print(
-                                f"  [success]Label '{_READY_FOR_REVIEW_LABEL}' 
added to PR {_pr_link(pr)}.[/]"
-                            )
-                            total_ready += 1
-                        else:
-                            get_console().print(f"  [warning]Failed to add 
label to PR {_pr_link(pr)}.[/]")
-                        continue
-
-                    if action == TriageAction.COMMENT:
-                        get_console().print(f"  Posting comment on PR 
{_pr_link(pr)}...")
-                        if _post_comment(token, pr.node_id, comment_only):
-                            get_console().print(f"  [success]Comment posted on 
PR {_pr_link(pr)}.[/]")
-                            total_commented += 1
-                        else:
-                            get_console().print(f"  [error]Failed to post 
comment on PR {_pr_link(pr)}.[/]")
-                        continue
-
-                    if action == TriageAction.DRAFT:
-                        get_console().print(f"  Converting PR {_pr_link(pr)} 
to draft...")
-                        if _convert_pr_to_draft(token, pr.node_id):
-                            get_console().print(f"  [success]PR {_pr_link(pr)} 
converted to draft.[/]")
-                        else:
-                            get_console().print(f"  [error]Failed to convert 
PR {_pr_link(pr)} to draft.[/]")
-                            continue
-
-                        get_console().print(f"  Posting comment on PR 
{_pr_link(pr)}...")
-                        if _post_comment(token, pr.node_id, comment):
-                            get_console().print(f"  [success]Comment posted on 
PR {_pr_link(pr)}.[/]")
-                            total_converted += 1
-                        else:
-                            get_console().print(f"  [error]Failed to post 
comment on PR {_pr_link(pr)}.[/]")
-                        continue
-
-                    if action == TriageAction.CLOSE:
-                        get_console().print(f"  Closing PR {_pr_link(pr)}...")
-                        if _close_pr(token, pr.node_id):
-                            get_console().print(f"  [success]PR {_pr_link(pr)} 
closed.[/]")
-                        else:
-                            get_console().print(f"  [error]Failed to close PR 
{_pr_link(pr)}.[/]")
-                            continue
-
-                        if _add_label(token, github_repository, pr.node_id, 
_CLOSED_QUALITY_LABEL):
-                            get_console().print(
-                                f"  [success]Label '{_CLOSED_QUALITY_LABEL}' 
added to PR {_pr_link(pr)}.[/]"
-                            )
-                        else:
-                            get_console().print(f"  [warning]Failed to add 
label to PR {_pr_link(pr)}.[/]")
-
-                        get_console().print(f"  Posting comment on PR 
{_pr_link(pr)}...")
-                        if _post_comment(token, pr.node_id, close_comment):
-                            get_console().print(f"  [success]Comment posted on 
PR {_pr_link(pr)}.[/]")
-                            total_closed += 1
-                        else:
-                            get_console().print(f"  [error]Failed to post 
comment on PR {_pr_link(pr)}.[/]")
-
-            # Check if all futures are done
-            if len(llm_completed) >= len(llm_future_to_pr):
-                break
-
-            # Still pending — wait briefly for more results, then loop back
-            get_console().print(
-                f"[dim]Waiting for {len(llm_future_to_pr) - 
len(llm_completed)} "
-                f"remaining LLM {'assessments' if len(llm_future_to_pr) - 
len(llm_completed) != 1 else 'assessment'}...[/]"
-            )
-            time.sleep(2)
-            _collect_llm_results(llm_future_to_pr, llm_assessments, 
llm_completed, llm_errors, llm_passing)
-
-        total_llm_errors = len(llm_errors)
-        get_console().print(
-            f"\n[info]LLM assessment complete: {len(llm_assessments)} flagged, 
"
-            f"{len(llm_passing)} passed, {total_llm_errors} errors "
-            f"(out of {len(llm_future_to_pr)} assessed).[/]\n"
-        )
+    # Phase 5b: Present LLM-flagged PRs as they become ready (streaming)
+    _review_llm_flagged_prs(ctx, llm_candidates)
 
-        # Add LLM passing PRs to the passing list
-        passing_prs.extend(llm_passing)
+    # Add LLM passing PRs to the passing list
+    passing_prs.extend(llm_passing)
 
     # Phase 5c: Present passing PRs for optional ready-for-review marking
-    if not quit_early and passing_prs:
-        passing_prs.sort(key=lambda p: (p.author_login.lower(), p.number))
-        get_console().print(
-            f"\n[info]{len(passing_prs)} {'PRs pass' if len(passing_prs) != 1 
else 'PR passes'} "
-            f"all checks — review to mark as ready:[/]\n"
-        )
-        for pr in passing_prs:
-            author_profile = _fetch_author_profile(token, pr.author_login, 
github_repository)
-            _display_pr_info_panels(pr, author_profile)
-
-            if dry_run:
-                get_console().print("[warning]Dry run — skipping.[/]")
-                continue
-
-            action = prompt_triage_action(
-                f"Action for PR {_pr_link(pr)}?",
-                default=TriageAction.SKIP,
-                forced_answer=answer_triage,
-            )
-
-            if action == TriageAction.QUIT:
-                get_console().print("[warning]Quitting.[/]")
-                quit_early = True
-                break
-
-            if action == TriageAction.SKIP:
-                get_console().print(f"  [info]Skipping PR {_pr_link(pr)} — no 
action taken.[/]")
-                total_skipped_action += 1
-                pr_actions[pr.number] = "skipped"
-                continue
-
-            if action == TriageAction.READY:
-                get_console().print(
-                    f"  [info]Marking PR {_pr_link(pr)} as ready "
-                    f"— adding '{_READY_FOR_REVIEW_LABEL}' label.[/]"
-                )
-                if _add_label(token, github_repository, pr.node_id, 
_READY_FOR_REVIEW_LABEL):
-                    get_console().print(
-                        f"  [success]Label '{_READY_FOR_REVIEW_LABEL}' added 
to PR {_pr_link(pr)}.[/]"
-                    )
-                    total_ready += 1
-                    pr_actions[pr.number] = "ready"
-                else:
-                    get_console().print(f"  [warning]Failed to add label to PR 
{_pr_link(pr)}.[/]")
-            else:
-                get_console().print(f"  [info]Skipping PR {_pr_link(pr)} — no 
action taken.[/]")
-                total_skipped_action += 1
-                pr_actions[pr.number] = "skipped"
+    _review_passing_prs(ctx, passing_prs)
 
     # Shut down LLM executor if it was started
     if llm_executor is not None:
         llm_executor.shutdown(wait=False, cancel_futures=True)
 
-    # Combine flagged counts for summary
-    total_flagged = total_deterministic_flags + len(llm_assessments)
-    total_llm_flagged = len(llm_assessments)
-
-    get_console().print(
-        f"\n[info]Assessment complete: {total_flagged} {'PRs' if total_flagged 
!= 1 else 'PR'} "
-        f"flagged ({total_deterministic_flags} CI/conflicts/comments, "
-        f"{total_llm_flagged} LLM-flagged"
-        f"{f', {total_llm_errors} LLM errors' if total_llm_errors else ''}"
-        f"{f', {len(pending_approval)} awaiting workflow approval' if 
pending_approval else ''}"
-        f").[/]\n"
+    # Display summary
+    _display_triage_summary(
+        all_prs,
+        candidate_prs,
+        passing_prs,
+        pending_approval,
+        stats,
+        total_deterministic_flags=total_deterministic_flags,
+        total_llm_flagged=len(llm_assessments),
+        total_llm_errors=len(llm_errors),
+        total_skipped_collaborator=total_skipped_collaborator,
+        total_skipped_bot=total_skipped_bot,
+        total_skipped_accepted=total_skipped_accepted,
     )
 
-    # Summary
-    t_total_end = time.monotonic()
-    get_console().print()
-    summary_table = Table(title="Summary")
-    summary_table.add_column("Metric", style="bold")
-    summary_table.add_column("Count", justify="right")
-    total_skipped = total_skipped_collaborator + total_skipped_bot + 
total_skipped_accepted
-    summary_table.add_row("PRs fetched", str(len(all_prs)))
-    if verbose:
-        summary_table.add_row("Collaborators skipped", 
str(total_skipped_collaborator))
-        summary_table.add_row("Bots skipped", str(total_skipped_bot))
-        summary_table.add_row("Ready-for-review skipped", 
str(total_skipped_accepted))
-    summary_table.add_row("PRs skipped (filtered)", str(total_skipped))
-    summary_table.add_row("PRs assessed", str(len(candidate_prs)))
-    summary_table.add_row("Flagged by CI/conflicts/comments", 
str(total_deterministic_flags))
-    summary_table.add_row("Flagged by LLM", str(total_llm_flagged))
-    summary_table.add_row("LLM errors (skipped)", str(total_llm_errors))
-    summary_table.add_row("Total flagged", str(total_flagged))
-    summary_table.add_row("PRs passing all checks", str(len(passing_prs)))
-    summary_table.add_row("PRs converted to draft", str(total_converted))
-    summary_table.add_row("PRs commented (not drafted)", str(total_commented))
-    summary_table.add_row("PRs closed", str(total_closed))
-    summary_table.add_row("PRs marked ready for review", str(total_ready))
-    summary_table.add_row("PRs skipped (no action)", str(total_skipped_action))
-    summary_table.add_row("Awaiting workflow approval", 
str(len(pending_approval)))
-    summary_table.add_row("PRs with workflows approved", 
str(total_workflows_approved))
-    get_console().print(summary_table)
-
     # Timing summary
+    t_total_end = time.monotonic()
     get_console().print()
     timing_table = Table(title="Timing Summary")
     timing_table.add_column("Phase", style="bold")
@@ -2571,22 +2587,12 @@ def auto_triage(
         "[dim]—[/]",
     )
 
-    phase2b_total = t_phase2b_end - t_phase2b_start
-    timing_table.add_row(
-        "Fetch checks + resolve mergeability",
-        _fmt_duration(phase2b_total),
-        str(len(candidate_prs)),
-        _fmt_duration(phase2b_total / num_candidates),
-        "[dim]—[/]",
-        "[dim]—[/]",
-    )
-
-    phase2c_total = t_phase2c_end - t_phase2b_end
+    enrich_total = t_enrich_end - t_enrich_start
     timing_table.add_row(
-        "Fetch review comments",
-        _fmt_duration(phase2c_total),
+        "Enrich candidates (checks + mergeability + comments)",
+        _fmt_duration(enrich_total),
         str(len(candidate_prs)),
-        _fmt_duration(phase2c_total / num_candidates),
+        _fmt_duration(enrich_total / num_candidates),
         "[dim]—[/]",
         "[dim]—[/]",
     )
@@ -2606,7 +2612,7 @@ def auto_triage(
         timing_table.add_row("Deterministic triage", "[dim]—[/]", "0", 
"[dim]—[/]", "[dim]—[/]", "[dim]—[/]")
 
     llm_count = len(llm_completed)
-    llm_wall_time = t_total_end - t_phase2c_end  # LLM runs in background 
across all interactive phases
+    llm_wall_time = t_total_end - t_enrich_end  # LLM runs in background 
across all interactive phases
     if llm_count:
         timing_table.add_row(
             "LLM assessment (background)",
@@ -2623,7 +2629,7 @@ def auto_triage(
 
     timing_table.add_row(
         "Interactive review (overlaps LLM)",
-        _fmt_duration(t_total_end - t_phase2c_end),
+        _fmt_duration(t_total_end - t_enrich_end),
         "",
         "",
         "",
@@ -2644,7 +2650,7 @@ def auto_triage(
         pr_titles = {pr.number: pr.title for pr in candidate_prs}
         # Amortize batch fetch time evenly across candidate PRs
         num_candidates = len(candidate_prs) or 1
-        fetch_per_pr = (t_phase2b_end - t_phase2b_start + t_phase2c_end - 
t_phase2b_end) / num_candidates
+        fetch_per_pr = enrich_total / num_candidates
 
         action_styles = {
             "drafted": "[yellow]drafted[/]",


Reply via email to