diff --git a/README.md b/README.md index 8893bb7..53fe20f 100644 --- a/README.md +++ b/README.md @@ -64,15 +64,19 @@ npx skills add https://github.com/ShigureLab/gh-llm --skill github-conversation Read a PR's full timeline — metadata, comments, reviews, checks — with progressive expansion: ```bash -# Show first + last timeline pages with actionable hints +# Initial read: show first + last timeline pages with actionable hints gh-llm pr view 77900 --repo PaddlePaddle/Paddle gh llm pr view 77900 --repo PaddlePaddle/Paddle +# Later incremental read: reuse the previous frontmatter `fetched_at` +gh-llm pr view 77900 --repo PaddlePaddle/Paddle --after 2026-04-08T02:41:17Z + # Show selected regions only gh-llm pr view 77900 --repo PaddlePaddle/Paddle --show timeline,checks # Expand one hidden timeline page gh-llm pr timeline-expand 2 --pr 77900 --repo PaddlePaddle/Paddle +gh-llm pr timeline-expand 2 --pr 77900 --repo PaddlePaddle/Paddle --after 2026-04-08T02:41:17Z # Auto-expand folded content in default/timeline view gh-llm pr view 77900 --repo PaddlePaddle/Paddle --expand resolved,minimized @@ -118,12 +122,16 @@ Issue reading works the same way as PR reading — timeline view with progressiv ```bash gh-llm issue view 77924 --repo PaddlePaddle/Paddle +gh-llm issue view 77924 --repo PaddlePaddle/Paddle --after 2026-04-08T02:41:17Z gh-llm issue timeline-expand 2 --issue 77924 --repo PaddlePaddle/Paddle +gh-llm issue timeline-expand 2 --issue 77924 --repo PaddlePaddle/Paddle --after 2026-04-08T02:41:17Z gh-llm issue comment-expand IC_xxx --issue 77924 --repo PaddlePaddle/Paddle gh-llm issue view 77924 --repo PaddlePaddle/Paddle --expand minimized,details gh-llm issue view 77924 --repo PaddlePaddle/Paddle --show meta,description ``` +For incremental follow-ups, copy the previous output's `fetched_at` value into `--after `. `--before` is also available when you want to inspect only older timeline slices. + When `--show` does not include `timeline` (for example `--show meta`, `--show summary`, or `--show actions`), both `pr view` and `issue view` stay on the lightweight metadata path and skip timeline bootstrap. Use `--show` to choose which output sections to render. Use `--expand` to automatically open folded content within those sections. @@ -309,6 +317,8 @@ This supports the normal flow where one review contains multiple inline comments All output follows consistent formatting rules so both humans and LLMs can parse it reliably: - **Metadata** is rendered as YAML-style frontmatter at the top of PR/issue views. +- Frontmatter includes `fetched_at`, so the next incremental read can use `--after `. +- When timeline filtering is active, frontmatter also includes `timeline_after` / `timeline_before` and filtered vs unfiltered event counts. - **Description** is wrapped in `...` tags. - **Comment bodies** use `...` tags to avoid markdown fence ambiguity with code blocks inside comments. - **Hidden timeline sections** are separated by `---` dividers and include ready-to-run expand commands to load the omitted content. diff --git a/skills/github-conversation/SKILL.md b/skills/github-conversation/SKILL.md index 8c80524..1990aad 100644 --- a/skills/github-conversation/SKILL.md +++ b/skills/github-conversation/SKILL.md @@ -109,11 +109,15 @@ Use this before forking or opening a PR when you need the default branch, onboar ```bash gh-llm pr view --repo +gh-llm pr view --repo --after gh-llm pr timeline-expand --pr --repo +gh-llm pr timeline-expand --pr --repo --after gh-llm pr review-expand --pr --repo gh-llm pr checks --pr --repo ``` +Use plain `view` for the first pass. On follow-up reads, reuse the previous frontmatter `fetched_at` as `--after ` for an incremental timeline refresh. + ### Prepare a PR body ```bash @@ -131,10 +135,13 @@ Use this before `gh pr create` when you need to load a repo PR template, append ```bash gh-llm issue view --repo +gh-llm issue view --repo --after gh-llm issue timeline-expand --issue --repo +gh-llm issue timeline-expand --issue --repo --after ``` For lightweight inspection, prefer non-timeline `--show` combinations such as `--show meta`, `--show summary`, or `--show actions`; `gh-llm` keeps those paths on metadata-only loading unless `timeline` is explicitly requested. +Frontmatter includes `fetched_at`, plus `timeline_after` / `timeline_before` and filtered vs unfiltered counts when timeline filtering is active. ### Write simple updates diff --git a/src/gh_llm/commands/issue.py b/src/gh_llm/commands/issue.py index 3d7bf90..6185636 100644 --- a/src/gh_llm/commands/issue.py +++ b/src/gh_llm/commands/issue.py @@ -5,7 +5,9 @@ from gh_llm.commands.options import ( add_body_input_arguments, + add_timeline_window_arguments, maybe_resolve_subject, + parse_timeline_window, raise_unknown_option_value, resolve_file_or_inline_text, resolve_subject, @@ -65,6 +67,7 @@ def register_issue_parser(subparsers: Any) -> None: default=[], help="auto-expand folded content: minimized, details, all (comma-separated or repeatable)", ) + add_timeline_window_arguments(view_parser) view_parser.set_defaults(handler=cmd_issue_view) timeline_expand_parser = issue_subparsers.add_parser("timeline-expand", help="load one timeline page by number") @@ -78,6 +81,7 @@ def register_issue_parser(subparsers: Any) -> None: default=[], help="auto-expand folded content: minimized, details, all (comma-separated or repeatable)", ) + add_timeline_window_arguments(timeline_expand_parser) timeline_expand_parser.set_defaults(handler=cmd_issue_timeline_expand) details_expand_parser = issue_subparsers.add_parser( @@ -88,6 +92,7 @@ def register_issue_parser(subparsers: Any) -> None: details_expand_parser.add_argument("--issue", help="Issue number/url") details_expand_parser.add_argument("--repo", help="repository in OWNER/REPO format") details_expand_parser.add_argument("--page-size", type=int, help="timeline entries per page") + add_timeline_window_arguments(details_expand_parser) details_expand_parser.set_defaults(handler=cmd_issue_details_expand) comment_edit_parser = issue_subparsers.add_parser("comment-edit", help="edit one issue comment by node id") @@ -113,6 +118,7 @@ def cmd_issue_view(args: Any) -> int: page_size = int(args.page_size) expand = _parse_expand_options(raw_values=list(getattr(args, "expand", []))) show = _parse_show_options(raw_values=list(getattr(args, "show", []))) + timeline_window = _resolve_timeline_window(args) client = GitHubClient() pager = TimelinePager(client) @@ -126,6 +132,7 @@ def cmd_issue_view(args: Any) -> int: context, first_page, last_page = pager.build_initial( meta, page_size=page_size, + timeline_window=timeline_window, show_minimized_details=expand.minimized, show_details_blocks=expand.details, ) @@ -194,8 +201,14 @@ def print_block(lines: list[str]) -> None: def cmd_issue_timeline_expand(args: Any) -> int: client = GitHubClient() pager = TimelinePager(client) - context, meta = _resolve_context_and_meta(client=client, pager=pager, args=args) expand = _parse_expand_options(raw_values=list(getattr(args, "expand", []))) + context, meta = _resolve_context_and_meta( + client=client, + pager=pager, + args=args, + show_minimized_details=expand.minimized, + show_details_blocks=expand.details, + ) page = pager.fetch_page( meta=meta, @@ -217,13 +230,19 @@ def cmd_issue_timeline_expand(args: Any) -> int: def cmd_issue_details_expand(args: Any) -> int: client = GitHubClient() pager = TimelinePager(client) - context, meta = _resolve_context_and_meta(client=client, pager=pager, args=args) + context, meta = _resolve_context_and_meta( + client=client, + pager=pager, + args=args, + show_minimized_details=True, + show_details_blocks=True, + ) index = int(args.index) - if index < 1 or index > context.total_count: + page_number = _resolve_timeline_page_for_index(context=context, index=index) + if page_number is None: raise RuntimeError(f"invalid event index {index}, expected in 1..{context.total_count}") - page_number = ((index - 1) // context.page_size) + 1 page = pager.fetch_page( meta=meta, context=context, @@ -232,10 +251,12 @@ def cmd_issue_details_expand(args: Any) -> int: show_details_blocks=True, diff_hunk_lines=None, ) - page_start = (page_number - 1) * context.page_size + 1 - offset = index - page_start - if offset < 0 or offset >= len(page.items): - raise RuntimeError("event index is outside loaded page range") + try: + offset = page.absolute_indexes.index(index) + except ValueError: + raise RuntimeError("event index is outside loaded page range") from None + except AttributeError as error: # pragma: no cover - defensive fallback + raise RuntimeError("event index is outside loaded page range") from error for line in render_event_detail_blocks(index=index, event=page.items[offset]): print(line) @@ -282,15 +303,41 @@ def _resolve_optional_issue(*, client: GitHubClient, args: Any) -> PullRequestMe def _resolve_context_and_meta( - *, client: GitHubClient, pager: TimelinePager, args: Any + *, + client: GitHubClient, + pager: TimelinePager, + args: Any, + show_minimized_details: bool = False, + show_details_blocks: bool = False, ) -> tuple[TimelineContext, PullRequestMeta]: page_size = getattr(args, "page_size", None) effective_page_size = DEFAULT_PAGE_SIZE if page_size is None else int(page_size) meta = _resolve_issue_meta(client=client, args=args) - context, _, _ = pager.build_initial(meta=meta, page_size=effective_page_size) + context, _, _ = pager.build_initial( + meta=meta, + page_size=effective_page_size, + timeline_window=_resolve_timeline_window(args), + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + ) return context, meta +def _resolve_timeline_window(args: Any): + return parse_timeline_window(after=getattr(args, "after", None), before=getattr(args, "before", None)) + + +def _resolve_timeline_page_for_index(*, context: TimelineContext, index: int) -> int | None: + if context.timeline_filtered: + for page_number, page in context.filtered_pages.items(): + if index in page.absolute_indexes: + return page_number + return None + if index < 1 or index > context.total_count: + return None + return ((index - 1) // context.page_size) + 1 + + def _parse_expand_options(*, raw_values: list[str]) -> _ExpandOptions: minimized = False details = False diff --git a/src/gh_llm/commands/options.py b/src/gh_llm/commands/options.py index 3431b9c..fac1a23 100644 --- a/src/gh_llm/commands/options.py +++ b/src/gh_llm/commands/options.py @@ -1,10 +1,13 @@ from __future__ import annotations import sys +from datetime import UTC, datetime from difflib import get_close_matches from pathlib import Path from typing import TYPE_CHECKING, Any, NoReturn +from gh_llm.models import TimelineWindow + if TYPE_CHECKING: from collections.abc import Callable @@ -29,6 +32,40 @@ def add_body_input_arguments( ) +def add_timeline_window_arguments(parser: Any) -> None: + parser.add_argument( + "--after", + help="only include timeline events strictly after this ISO 8601 / RFC3339 timestamp", + ) + parser.add_argument( + "--before", + help="only include timeline events strictly before this ISO 8601 / RFC3339 timestamp", + ) + + +def parse_timeline_window(*, after: str | None, before: str | None) -> TimelineWindow: + after_value = _parse_timestamp(raw=after, flag="--after") + before_value = _parse_timestamp(raw=before, flag="--before") + if after_value is not None and before_value is not None and after_value >= before_value: + raise RuntimeError("invalid time range: `--after` must be earlier than `--before`") + return TimelineWindow( + after=after_value, + before=before_value, + after_text=(format_timestamp_utc(after_value) if after_value is not None else None), + before_text=(format_timestamp_utc(before_value) if before_value is not None else None), + ) + + +def format_timestamp_utc(value: datetime) -> str: + utc_value = value.astimezone(UTC) + timespec = "microseconds" if utc_value.microsecond else "seconds" + return utc_value.isoformat(timespec=timespec).replace("+00:00", "Z") + + +def current_timestamp_utc() -> str: + return format_timestamp_utc(datetime.now(UTC).replace(microsecond=0)) + + def read_text_from_path_or_stdin(path: str) -> str: if path == "-": return sys.stdin.read() @@ -94,3 +131,20 @@ def raise_unknown_option_value( suggest_text = f" Did you mean '{suggestion[0]}'?" if suggestion else "" valid_text = ", ".join(valid_values) raise RuntimeError(f"unknown {flag} option: {token}. Valid values: {valid_text}.{suggest_text}") + + +def _parse_timestamp(*, raw: str | None, flag: str) -> datetime | None: + if raw is None: + return None + normalized = raw.strip() + if not normalized: + raise RuntimeError(f"{flag} requires a timestamp value") + if normalized.endswith(("Z", "z")): + normalized = normalized[:-1] + "+00:00" + try: + value = datetime.fromisoformat(normalized) + except ValueError as error: + raise RuntimeError(f"invalid {flag} timestamp: {raw}") from error + if value.tzinfo is None or value.utcoffset() is None: + raise RuntimeError(f"invalid {flag} timestamp: {raw}") + return value.astimezone(UTC) diff --git a/src/gh_llm/commands/pr.py b/src/gh_llm/commands/pr.py index 35870aa..0370116 100644 --- a/src/gh_llm/commands/pr.py +++ b/src/gh_llm/commands/pr.py @@ -11,7 +11,9 @@ from gh_llm.commands.options import ( add_body_input_arguments, + add_timeline_window_arguments, maybe_resolve_subject, + parse_timeline_window, raise_unknown_option_value, resolve_file_or_inline_text, resolve_subject, @@ -96,6 +98,7 @@ def register_pr_parser(subparsers: Any) -> None: default=DEFAULT_DIFF_HUNK_LINES, help="max lines for each review diff hunk (<=0 means full)", ) + add_timeline_window_arguments(view_parser) view_parser.set_defaults(handler=cmd_pr_view) timeline_expand_parser = pr_subparsers.add_parser("timeline-expand", help="expand a specific timeline page") @@ -115,6 +118,7 @@ def register_pr_parser(subparsers: Any) -> None: default=DEFAULT_DIFF_HUNK_LINES, help="max lines for each review diff hunk (<=0 means full)", ) + add_timeline_window_arguments(timeline_expand_parser) timeline_expand_parser.set_defaults(handler=cmd_pr_timeline_expand) details_expand_parser = pr_subparsers.add_parser( @@ -125,6 +129,7 @@ def register_pr_parser(subparsers: Any) -> None: details_expand_parser.add_argument("--pr", help="PR number/url/branch") details_expand_parser.add_argument("--repo", help="repository in OWNER/REPO format") details_expand_parser.add_argument("--page-size", type=int, help="timeline entries per page") + add_timeline_window_arguments(details_expand_parser) details_expand_parser.set_defaults(handler=cmd_pr_details_expand) review_expand_parser = pr_subparsers.add_parser( @@ -376,6 +381,7 @@ def cmd_pr_view(args: Any) -> int: diff_hunk_lines = _resolve_diff_hunk_lines(args=args, default=DEFAULT_DIFF_HUNK_LINES) expand = _parse_expand_options(raw_values=list(getattr(args, "expand", []))) show = _parse_show_options(raw_values=list(getattr(args, "show", []))) + timeline_window = _resolve_timeline_window(args) client = GitHubClient() pager = TimelinePager(client) @@ -389,6 +395,7 @@ def cmd_pr_view(args: Any) -> int: context, first_page, last_page = pager.build_initial( meta, page_size=page_size, + timeline_window=timeline_window, show_resolved_details=expand.resolved, show_outdated_details=True, show_minimized_details=expand.minimized, @@ -483,9 +490,18 @@ def print_block(lines: list[str]) -> None: def cmd_pr_timeline_expand(args: Any) -> int: client = GitHubClient() pager = TimelinePager(client) - context, meta = _resolve_context_and_meta(client=client, pager=pager, args=args) diff_hunk_lines = _resolve_diff_hunk_lines(args=args, default=DEFAULT_DIFF_HUNK_LINES) expand = _parse_expand_options(raw_values=list(getattr(args, "expand", []))) + context, meta = _resolve_context_and_meta( + client=client, + pager=pager, + args=args, + show_resolved_details=expand.resolved, + show_outdated_details=True, + show_minimized_details=expand.minimized, + show_details_blocks=expand.details, + diff_hunk_lines=diff_hunk_lines, + ) page = pager.fetch_page( meta=meta, @@ -510,13 +526,22 @@ def cmd_pr_timeline_expand(args: Any) -> int: def cmd_pr_details_expand(args: Any) -> int: client = GitHubClient() pager = TimelinePager(client) - context, meta = _resolve_context_and_meta(client=client, pager=pager, args=args) + context, meta = _resolve_context_and_meta( + client=client, + pager=pager, + args=args, + show_resolved_details=True, + show_outdated_details=True, + show_minimized_details=True, + show_details_blocks=True, + diff_hunk_lines=None, + ) index = int(args.index) - if index < 1 or index > context.total_count: + page_number = _resolve_timeline_page_for_index(context=context, index=index) + if page_number is None: raise RuntimeError(f"invalid event index {index}, expected in 1..{context.total_count}") - page_number = ((index - 1) // context.page_size) + 1 page = pager.fetch_page( meta=meta, context=context, @@ -527,11 +552,10 @@ def cmd_pr_details_expand(args: Any) -> int: show_details_blocks=True, diff_hunk_lines=None, ) - - page_start = (page_number - 1) * context.page_size + 1 - offset = index - page_start - if offset < 0 or offset >= len(page.items): - raise RuntimeError("event index is outside loaded page range") + try: + offset = page.absolute_indexes.index(index) + except ValueError: + raise RuntimeError("event index is outside loaded page range") from None for line in render_event_detail_blocks(index=index, event=page.items[offset]): print(line) @@ -584,7 +608,7 @@ def cmd_pr_review_expand(args: Any) -> int: if not event.kind.startswith("review/"): continue if event.source_id in review_ids: - event_index = ((page_number - 1) * context.page_size) + offset + 1 + event_index = page.absolute_indexes[offset] matched[event.source_id] = (event_index, page) if len(matched) == len(review_ids): break @@ -598,8 +622,13 @@ def cmd_pr_review_expand(args: Any) -> int: continue event_index, page = item - page_start = (event_index - 1) // context.page_size * context.page_size + 1 - offset = event_index - page_start + try: + offset = page.absolute_indexes.index(event_index) + except ValueError: + print(f"## Review {review_id}") + print("(not found on this PR timeline)") + print() + continue event = page.items[offset] for line in render_event_detail(index=event_index, event=event): print(line) @@ -1125,20 +1154,47 @@ def cmd_pr_review_submit(args: Any) -> int: def _resolve_context_and_meta( - *, client: GitHubClient, pager: TimelinePager, args: Any + *, + client: GitHubClient, + pager: TimelinePager, + args: Any, + show_resolved_details: bool = False, + show_outdated_details: bool = False, + show_minimized_details: bool = False, + show_details_blocks: bool = False, + diff_hunk_lines: int | None = DEFAULT_DIFF_HUNK_LINES, ) -> tuple[TimelineContext, PullRequestMeta]: page_size = getattr(args, "page_size", None) effective_page_size = DEFAULT_PAGE_SIZE if page_size is None else int(page_size) - diff_hunk_lines = _resolve_diff_hunk_lines(args=args, default=DEFAULT_DIFF_HUNK_LINES) meta = _resolve_pr_meta(client=client, args=args) context, _, _ = pager.build_initial( meta=meta, page_size=effective_page_size, + timeline_window=_resolve_timeline_window(args), + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, diff_hunk_lines=diff_hunk_lines, ) return context, meta +def _resolve_timeline_window(args: Any): + return parse_timeline_window(after=getattr(args, "after", None), before=getattr(args, "before", None)) + + +def _resolve_timeline_page_for_index(*, context: TimelineContext, index: int) -> int | None: + if context.timeline_filtered: + for page_number, page in context.filtered_pages.items(): + if index in page.absolute_indexes: + return page_number + return None + if index < 1 or index > context.total_count: + return None + return ((index - 1) // context.page_size) + 1 + + def _resolve_diff_hunk_lines(*, args: Any, default: int) -> int | None: raw = getattr(args, "diff_hunk_lines", None) if raw is None: diff --git a/src/gh_llm/models.py b/src/gh_llm/models.py index 9f4eeb0..14e8f4b 100644 --- a/src/gh_llm/models.py +++ b/src/gh_llm/models.py @@ -52,6 +52,18 @@ class PullRequestMeta: conflict_files: tuple[str, ...] = () +@dataclass(frozen=True) +class TimelineWindow: + after: datetime | None = None + before: datetime | None = None + after_text: str | None = None + before_text: str | None = None + + @property + def active(self) -> bool: + return self.after is not None or self.before is not None + + @dataclass(frozen=True) class PageInfo: has_next_page: bool @@ -82,6 +94,7 @@ class TimelinePage: items: list[TimelineEvent] total_count: int page_info: PageInfo + absolute_indexes: tuple[int, ...] = () @dataclass(frozen=True) @@ -194,7 +207,12 @@ class TimelineContext: is_draft: bool body: str updated_at: str + fetched_at: str = "" timeline_loaded: bool = True + timeline_after: str | None = None + timeline_before: str | None = None + timeline_unfiltered_count: int | None = None + timeline_filtered: bool = False labels: tuple[str, ...] = () kind: str = "pr" pr_reactions_summary: str | None = None @@ -222,6 +240,7 @@ class TimelineContext: conflict_files: tuple[str, ...] = () forward_after_by_page: dict[int, str | None] = field(default_factory=lambda: cast("dict[int, str | None]", {})) backward_before_by_page: dict[int, str | None] = field(default_factory=lambda: cast("dict[int, str | None]", {})) + filtered_pages: dict[int, TimelinePage] = field(default_factory=lambda: cast("dict[int, TimelinePage]", {})) def to_dict(self) -> dict[str, object]: return { @@ -238,7 +257,12 @@ def to_dict(self) -> dict[str, object]: "is_draft": self.is_draft, "body": self.body, "updated_at": self.updated_at, + "fetched_at": self.fetched_at, "timeline_loaded": self.timeline_loaded, + "timeline_after": self.timeline_after, + "timeline_before": self.timeline_before, + "timeline_unfiltered_count": self.timeline_unfiltered_count, + "timeline_filtered": self.timeline_filtered, "labels": list(self.labels), "kind": self.kind, "pr_reactions_summary": self.pr_reactions_summary, @@ -284,11 +308,20 @@ def from_dict(cls, value: dict[str, object]) -> TimelineContext: is_draft=bool(value.get("is_draft")), body=_as_str(value.get("body"), ""), updated_at=_as_str(value.get("updated_at"), ""), + fetched_at=_as_str(value.get("fetched_at"), ""), timeline_loaded=( _as_int(value.get("total_pages"), 0) > 0 if value.get("timeline_loaded") is None else bool(value.get("timeline_loaded")) ), + timeline_after=_as_str_optional(value.get("timeline_after")), + timeline_before=_as_str_optional(value.get("timeline_before")), + timeline_unfiltered_count=( + None + if value.get("timeline_unfiltered_count") is None + else _as_int(value.get("timeline_unfiltered_count"), 0) + ), + timeline_filtered=bool(value.get("timeline_filtered")), labels=tuple(_as_str(item, "") for item in _as_list(value.get("labels")) if item), kind=_as_str(value.get("kind"), "pr"), pr_reactions_summary=_as_str_optional(value.get("pr_reactions_summary")), diff --git a/src/gh_llm/pager.py b/src/gh_llm/pager.py index 3ff0336..1a748e7 100644 --- a/src/gh_llm/pager.py +++ b/src/gh_llm/pager.py @@ -3,11 +3,12 @@ import math from typing import TYPE_CHECKING -from gh_llm.models import TimelineContext +from gh_llm.commands.options import current_timestamp_utc, parse_timeline_window +from gh_llm.models import PageInfo, TimelineContext, TimelinePage, TimelineWindow if TYPE_CHECKING: from gh_llm.github_api import GitHubClient - from gh_llm.models import PullRequestMeta, TimelinePage + from gh_llm.models import PullRequestMeta, TimelineEvent DEFAULT_PAGE_SIZE = 8 @@ -18,6 +19,12 @@ def build_context_from_meta( *, total_count: int | None = None, total_pages: int | None = None, + fetched_at: str | None = None, + timeline_after: str | None = None, + timeline_before: str | None = None, + timeline_unfiltered_count: int | None = None, + timeline_filtered: bool = False, + filtered_pages: dict[int, TimelinePage] | None = None, ) -> TimelineContext: _validate_page_size(page_size) @@ -39,7 +46,12 @@ def build_context_from_meta( is_draft=meta.is_draft, body=meta.body, updated_at=meta.updated_at, + fetched_at=(current_timestamp_utc() if fetched_at is None else fetched_at), timeline_loaded=timeline_loaded, + timeline_after=timeline_after, + timeline_before=timeline_before, + timeline_unfiltered_count=timeline_unfiltered_count, + timeline_filtered=timeline_filtered, labels=meta.labels, kind=meta.kind, pr_reactions_summary=meta.reactions_summary, @@ -65,8 +77,9 @@ def build_context_from_meta( rebase_merge_allowed=meta.rebase_merge_allowed, co_author_trailers=meta.co_author_trailers, conflict_files=meta.conflict_files, - forward_after_by_page=({1: None} if timeline_loaded else {}), - backward_before_by_page=({resolved_total_pages: None} if timeline_loaded else {}), + forward_after_by_page=({1: None} if timeline_loaded and not timeline_filtered else {}), + backward_before_by_page=({resolved_total_pages: None} if timeline_loaded and not timeline_filtered else {}), + filtered_pages=({} if filtered_pages is None else filtered_pages), ) @@ -79,6 +92,7 @@ def build_initial( meta: PullRequestMeta, page_size: int, *, + timeline_window: TimelineWindow | None = None, show_resolved_details: bool = False, show_outdated_details: bool = False, show_minimized_details: bool = False, @@ -87,6 +101,22 @@ def build_initial( diff_hunk_lines: int | None = None, ) -> tuple[TimelineContext, TimelinePage, TimelinePage | None]: _validate_page_size(page_size) + window = TimelineWindow() if timeline_window is None else timeline_window + fetched_at = current_timestamp_utc() + + if window.active: + return self._build_filtered_initial( + meta, + page_size, + timeline_window=window, + fetched_at=fetched_at, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + ) first_page = self._client.fetch_timeline_forward( meta.ref, @@ -102,12 +132,20 @@ def build_initial( ) total_count = first_page.total_count total_pages = _page_count(total_count, page_size) + first_page = _with_absolute_indexes( + first_page, + page=1, + total_count=total_count, + total_pages=total_pages, + default_page_size=page_size, + ) context = build_context_from_meta( meta=meta, page_size=page_size, total_count=total_count, total_pages=total_pages, + fetched_at=fetched_at, ) self._remember_forward(context, page=1, cursor_used=None, page_result=first_page) @@ -129,6 +167,13 @@ def build_initial( diff_hunk_lines=diff_hunk_lines, kind=meta.kind, ) + last_page = _with_absolute_indexes( + last_page, + page=total_pages, + total_count=total_count, + total_pages=total_pages, + default_page_size=page_size, + ) self._remember_backward(context, page=total_pages, cursor_used=None, page_result=last_page) return context, first_page, last_page @@ -146,6 +191,18 @@ def fetch_page( diff_hunk_lines: int | None = None, ) -> TimelinePage: _validate_page(page, context.total_pages) + if context.timeline_filtered: + return self._fetch_filtered_page( + meta, + context, + page, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + ) from_start = page - 1 from_end = context.total_pages - page @@ -173,6 +230,280 @@ def fetch_page( diff_hunk_lines=diff_hunk_lines, ) + def _fetch_filtered_page( + self, + meta: PullRequestMeta, + context: TimelineContext, + page: int, + *, + show_resolved_details: bool, + show_outdated_details: bool, + show_minimized_details: bool, + show_details_blocks: bool, + review_threads_window: int | None, + diff_hunk_lines: int | None, + ) -> TimelinePage: + timeline_window = parse_timeline_window(after=context.timeline_after, before=context.timeline_before) + if not timeline_window.active: + result = context.filtered_pages.get(page) + if result is None: + raise RuntimeError(f"filtered timeline page {page} is unavailable") + return result + + if timeline_window.after is not None: + collected, _ = self._collect_filtered_from_end( + meta, + page_size=context.page_size, + timeline_window=timeline_window, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + ) + else: + collected, _ = self._collect_filtered_from_start( + meta, + page_size=context.page_size, + timeline_window=timeline_window, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + ) + + filtered_pages = _build_local_filtered_pages(collected=collected, page_size=context.page_size) + result = filtered_pages.get(page) + if result is None: + raise RuntimeError(f"filtered timeline page {page} is unavailable") + return result + + def _build_filtered_initial( + self, + meta: PullRequestMeta, + page_size: int, + *, + timeline_window: TimelineWindow, + fetched_at: str, + show_resolved_details: bool, + show_outdated_details: bool, + show_minimized_details: bool, + show_details_blocks: bool, + review_threads_window: int | None, + diff_hunk_lines: int | None, + ) -> tuple[TimelineContext, TimelinePage, TimelinePage | None]: + if timeline_window.after is not None: + collected, total_count = self._collect_filtered_from_end( + meta, + page_size=page_size, + timeline_window=timeline_window, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + ) + else: + collected, total_count = self._collect_filtered_from_start( + meta, + page_size=page_size, + timeline_window=timeline_window, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + ) + + filtered_pages = _build_local_filtered_pages(collected=collected, page_size=page_size) + filtered_total_count = len(collected) + filtered_total_pages = _page_count(filtered_total_count, page_size) + context = build_context_from_meta( + meta=meta, + page_size=page_size, + total_count=filtered_total_count, + total_pages=filtered_total_pages, + fetched_at=fetched_at, + timeline_after=timeline_window.after_text, + timeline_before=timeline_window.before_text, + timeline_unfiltered_count=total_count, + timeline_filtered=True, + filtered_pages=filtered_pages, + ) + first_page = filtered_pages[1] + last_page = None if filtered_total_pages == 1 else filtered_pages[filtered_total_pages] + return context, first_page, last_page + + def _collect_filtered_from_end( + self, + meta: PullRequestMeta, + *, + page_size: int, + timeline_window: TimelineWindow, + show_resolved_details: bool, + show_outdated_details: bool, + show_minimized_details: bool, + show_details_blocks: bool, + review_threads_window: int | None, + diff_hunk_lines: int | None, + ) -> tuple[list[tuple[int, TimelineEvent]], int]: + seed_page = self._client.fetch_timeline_backward( + meta.ref, + page_size=1, + before=None, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + kind=meta.kind, + ) + total_count = seed_page.total_count + total_pages = _page_count(total_count, page_size) + last_page_size = _page_size_for_page( + page=total_pages, + total_count=total_count, + total_pages=total_pages, + default_size=page_size, + ) + + page_number = total_pages + current_page = seed_page + if last_page_size != 1: + current_page = self._client.fetch_timeline_backward( + meta.ref, + page_size=last_page_size, + before=None, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + kind=meta.kind, + ) + current_page = _with_absolute_indexes( + current_page, + page=page_number, + total_count=total_count, + total_pages=total_pages, + default_page_size=page_size, + ) + + collected: list[tuple[int, TimelineEvent]] = [] + while True: + collected.extend(_matching_items(current_page, timeline_window)) + if not current_page.page_info.has_previous_page: + break + if ( + current_page.items + and timeline_window.after is not None + and current_page.items[0].timestamp <= timeline_window.after + ): + break + + before_cursor = current_page.page_info.start_cursor + if before_cursor is None: + raise RuntimeError("timeline backward cursor unexpectedly missing") + page_number -= 1 + current_page = self._client.fetch_timeline_backward( + meta.ref, + page_size=_page_size_for_page( + page=page_number, + total_count=total_count, + total_pages=total_pages, + default_size=page_size, + ), + before=before_cursor, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + kind=meta.kind, + ) + current_page = _with_absolute_indexes( + current_page, + page=page_number, + total_count=total_count, + total_pages=total_pages, + default_page_size=page_size, + ) + + collected.sort(key=lambda item: item[0]) + return collected, total_count + + def _collect_filtered_from_start( + self, + meta: PullRequestMeta, + *, + page_size: int, + timeline_window: TimelineWindow, + show_resolved_details: bool, + show_outdated_details: bool, + show_minimized_details: bool, + show_details_blocks: bool, + review_threads_window: int | None, + diff_hunk_lines: int | None, + ) -> tuple[list[tuple[int, TimelineEvent]], int]: + page_number = 1 + after_cursor: str | None = None + total_count: int | None = None + total_pages: int | None = None + collected: list[tuple[int, TimelineEvent]] = [] + + while True: + current_page = self._client.fetch_timeline_forward( + meta.ref, + page_size=page_size, + after=after_cursor, + show_resolved_details=show_resolved_details, + show_outdated_details=show_outdated_details, + show_minimized_details=show_minimized_details, + show_details_blocks=show_details_blocks, + review_threads_window=review_threads_window, + diff_hunk_lines=diff_hunk_lines, + kind=meta.kind, + ) + if total_count is None: + total_count = current_page.total_count + total_pages = _page_count(total_count, page_size) + assert total_count is not None + assert total_pages is not None + + current_page = _with_absolute_indexes( + current_page, + page=page_number, + total_count=total_count, + total_pages=total_pages, + default_page_size=page_size, + ) + collected.extend(_matching_items(current_page, timeline_window)) + + if not current_page.page_info.has_next_page: + break + if ( + current_page.items + and timeline_window.before is not None + and current_page.items[-1].timestamp >= timeline_window.before + ): + break + + after_cursor = current_page.page_info.end_cursor + if after_cursor is None: + raise RuntimeError("timeline forward cursor unexpectedly missing") + page_number += 1 + + assert total_count is not None + return collected, total_count + def _walk_forward( self, meta: PullRequestMeta, @@ -203,6 +534,13 @@ def _walk_forward( diff_hunk_lines=diff_hunk_lines, kind=meta.kind, ) + result = _with_absolute_indexes( + result, + page=page, + total_count=context.total_count, + total_pages=context.total_pages, + default_page_size=context.page_size, + ) self._remember_forward(context, page=page, cursor_used=cursor, page_result=result) if page == target_page: return result @@ -247,6 +585,13 @@ def _walk_backward( diff_hunk_lines=diff_hunk_lines, kind=meta.kind, ) + result = _with_absolute_indexes( + result, + page=page, + total_count=context.total_count, + total_pages=context.total_pages, + default_page_size=context.page_size, + ) self._remember_backward(context, page=page, cursor_used=cursor, page_result=result) if page == target_page: return result @@ -292,3 +637,90 @@ def _page_size_for_page(page: int, total_count: int, total_pages: int, default_s if remainder != 0: return remainder return default_size + + +def _matching_items(page: TimelinePage, timeline_window: TimelineWindow) -> list[tuple[int, TimelineEvent]]: + return [ + (absolute_index, event) + for absolute_index, event in zip(page.absolute_indexes, page.items, strict=False) + if _matches_window(event.timestamp, timeline_window) + ] + + +def _matches_window(timestamp: object, timeline_window: TimelineWindow) -> bool: + from datetime import datetime + + if not isinstance(timestamp, datetime): + return False + if timeline_window.after is not None and timestamp <= timeline_window.after: + return False + if timeline_window.before is not None and timestamp >= timeline_window.before: + return False + return True + + +def _build_local_filtered_pages( + *, collected: list[tuple[int, TimelineEvent]], page_size: int +) -> dict[int, TimelinePage]: + total_count = len(collected) + total_pages = _page_count(total_count, page_size) + pages: dict[int, TimelinePage] = {} + if not collected: + pages[1] = TimelinePage( + items=[], + total_count=0, + page_info=PageInfo( + has_next_page=False, + has_previous_page=False, + start_cursor=None, + end_cursor=None, + ), + absolute_indexes=(), + ) + return pages + + for page in range(1, total_pages + 1): + start = (page - 1) * page_size + end = min(start + page_size, total_count) + chunk = collected[start:end] + pages[page] = TimelinePage( + items=[event for _, event in chunk], + total_count=total_count, + page_info=PageInfo( + has_next_page=page < total_pages, + has_previous_page=page > 1, + start_cursor=None, + end_cursor=None, + ), + absolute_indexes=tuple(index for index, _ in chunk), + ) + return pages + + +def _with_absolute_indexes( + page_result: TimelinePage, + *, + page: int, + total_count: int, + total_pages: int, + default_page_size: int, +) -> TimelinePage: + start_index = _page_start_index( + page=page, + total_count=total_count, + total_pages=total_pages, + page_size=default_page_size, + item_count=len(page_result.items), + ) + return TimelinePage( + items=page_result.items, + total_count=page_result.total_count, + page_info=page_result.page_info, + absolute_indexes=tuple(range(start_index, start_index + len(page_result.items))), + ) + + +def _page_start_index(*, page: int, total_count: int, total_pages: int, page_size: int, item_count: int) -> int: + if page == total_pages and total_count % page_size != 0: + return max(1, total_count - item_count + 1) + return ((page - 1) * page_size) + 1 diff --git a/src/gh_llm/render.py b/src/gh_llm/render.py index c26af81..d7adfe7 100644 --- a/src/gh_llm/render.py +++ b/src/gh_llm/render.py @@ -35,11 +35,19 @@ def render_frontmatter(context: TimelineContext) -> list[str]: f"labels: {json.dumps(list(context.labels), ensure_ascii=False)}", f"draft: {str(context.is_draft).lower()}", f"updated_at: {context.updated_at}", + f"fetched_at: {context.fetched_at}", ] if context.timeline_loaded: lines.extend( [ f"timeline_events: {context.total_count}", + *( + [f"timeline_events_unfiltered: {context.timeline_unfiltered_count}"] + if context.timeline_filtered + else [] + ), + *([f"timeline_after: {context.timeline_after}"] if context.timeline_after else []), + *([f"timeline_before: {context.timeline_before}"] if context.timeline_before else []), f"page_size: {context.page_size}", f"total_pages: {context.total_pages}", ] @@ -104,9 +112,10 @@ def render_page(page_number: int, context: TimelineContext, page: TimelinePage) lines.append("(no events on this page)") return lines - start_index = _page_start_index(page_number=page_number, context=context, page=page) - for offset, item in enumerate(page.items): - lines.extend(_render_item(index=start_index + offset, event=item, context=context, command_group=context.kind)) + for index, item in zip( + _page_indexes(page_number=page_number, context=context, page=page), page.items, strict=False + ): + lines.extend(_render_item(index=index, event=item, context=context, command_group=context.kind)) return lines @@ -374,6 +383,7 @@ def render_hidden_gap(context: TimelineContext, hidden_pages: list[int]) -> list return [] repo = f"{context.owner}/{context.name}" selector_name = "issue" if context.kind == "issue" else "pr" + filter_flags = _timeline_filter_flags(context) hidden_label = ( f"Hidden timeline page: {hidden_pages[0]}" if len(hidden_pages) == 1 @@ -384,7 +394,7 @@ def render_hidden_gap(context: TimelineContext, hidden_pages: list[int]) -> list hidden_label, *[ _render_template( - t"- ⏎ `{display_command_with(f'{context.kind} timeline-expand {page} --{selector_name} {context.number} --repo {repo}')}`" + t"- ⏎ `{display_command_with(f'{context.kind} timeline-expand {page} --{selector_name} {context.number} --repo {repo}{filter_flags}')}`" ) for page in hidden_pages ], @@ -395,8 +405,9 @@ def render_hidden_gap(context: TimelineContext, hidden_pages: list[int]) -> list def _render_item(index: int, event: TimelineEvent, context: TimelineContext, command_group: str) -> list[str]: timestamp = event.timestamp.astimezone(UTC).strftime("%Y-%m-%d %H:%M UTC") selector_name = "issue" if command_group == "issue" else "pr" + filter_flags = _timeline_filter_flags(context) details_expand_cmd = display_command_with( - f"{command_group} details-expand {index} --{selector_name} {context.number} --repo {context.owner}/{context.name}" + f"{command_group} details-expand {index} --{selector_name} {context.number} --repo {context.owner}/{context.name}{filter_flags}" ) details_action = f"⏎ run `{details_expand_cmd}`" display_summary = (event.summary or "").replace( @@ -456,12 +467,28 @@ def _render_template(template: Template) -> str: return "".join(rendered) +def _page_indexes(page_number: int, context: TimelineContext, page: TimelinePage) -> tuple[int, ...]: + if page.absolute_indexes: + return page.absolute_indexes + start_index = _page_start_index(page_number=page_number, context=context, page=page) + return tuple(range(start_index, start_index + len(page.items))) + + def _page_start_index(page_number: int, context: TimelineContext, page: TimelinePage) -> int: if page_number == context.total_pages and context.total_count % context.page_size != 0: return max(1, context.total_count - len(page.items) + 1) return (page_number - 1) * context.page_size + 1 +def _timeline_filter_flags(context: TimelineContext) -> str: + parts: list[str] = [] + if context.timeline_after: + parts.extend(["--after", context.timeline_after]) + if context.timeline_before: + parts.extend(["--before", context.timeline_before]) + return (" " + " ".join(parts)) if parts else "" + + def render_event_detail(index: int, event: TimelineEvent) -> list[str]: timestamp = event.timestamp.astimezone(UTC).strftime("%Y-%m-%d %H:%M UTC") lines = [ diff --git a/tests/test_cli.py b/tests/test_cli.py index 89e9e43..a0cc442 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -740,6 +740,183 @@ def test_view_and_expand_use_real_cursor_pagination( assert "END_MARKER" in out +def test_pr_view_after_filters_incremental_events_and_avoids_forward_bootstrap( + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], +) -> None: + responder = GhResponder() + monkeypatch.setattr(github_api.subprocess, "run", responder.run) + + code = cli.run( + [ + "pr", + "view", + "77928", + "--repo", + "PaddlePaddle/Paddle", + "--page-size", + "2", + "--after", + "2026-02-14T14:44:36Z", + ] + ) + assert code == 0 + + out = capsys.readouterr().out + assert "fetched_at: " in out + assert "timeline_after: 2026-02-14T14:44:36Z" in out + assert "timeline_events: 3" in out + assert "timeline_events_unfiltered: 7" in out + assert "### Page 1/2" in out + assert "### Page 2/2" in out + assert "5. [2026-02-14 14:51 UTC] review/approved by @reviewer" in out + assert "7. [2026-02-14 15:11 UTC] comment by @ShigureNyako" in out + assert "commit 2" not in out + assert "comment 2" not in out + + timeline_queries = [ + _extract_form(call, "query") + for call in responder.calls + if call[:3] == ["gh", "api", "graphql"] and "timelineItems(" in _extract_form(call, "query") + ] + assert any("timelineItems(last:" in query for query in timeline_queries) + assert not any("timelineItems(first:" in query for query in timeline_queries) + + +def test_issue_view_before_filters_older_events( + monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str] +) -> None: + responder = GhResponder() + monkeypatch.setattr(github_api.subprocess, "run", responder.run) + + code = cli.run( + [ + "issue", + "view", + "77924", + "--repo", + "PaddlePaddle/Paddle", + "--page-size", + "2", + "--before", + "2026-02-13T13:30:00Z", + ] + ) + assert code == 0 + + out = capsys.readouterr().out + assert "timeline_before: 2026-02-13T13:30:00Z" in out + assert "timeline_events: 3" in out + assert "timeline_events_unfiltered: 5" in out + assert "### Page 1/2" in out + assert "### Page 2/2" in out + assert "3. [2026-02-13 13:00 UTC] comment by @ShigureNyako" in out + assert "issue/closed by @ShigureNyako" not in out + assert "issue/marked-as-duplicate" not in out + + timeline_queries = [ + _extract_form(call, "query") + for call in responder.calls + if call[:3] == ["gh", "api", "graphql"] and "timelineItems(" in _extract_form(call, "query") + ] + assert any("timelineItems(first:" in query for query in timeline_queries) + assert not any("timelineItems(last:" in query for query in timeline_queries) + + +def test_pr_timeline_expand_with_after_uses_filtered_page_numbers( + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], +) -> None: + monkeypatch.setattr(github_api.subprocess, "run", GhResponder().run) + + code = cli.run( + [ + "pr", + "timeline-expand", + "2", + "--pr", + "77928", + "--repo", + "PaddlePaddle/Paddle", + "--page-size", + "2", + "--after", + "2026-02-14T14:44:36Z", + ] + ) + assert code == 0 + + out = capsys.readouterr().out + assert "timeline_after: 2026-02-14T14:44:36Z" in out + assert "### Page 2/2" in out + assert "7. [2026-02-14 15:11 UTC] comment by @ShigureNyako" in out + assert "review/APPROVED by @reviewer" not in out + assert "comment-edit c3 --body '' --pr 77928 --repo PaddlePaddle/Paddle" in out + + +def test_pr_timeline_expand_with_after_and_expand_option( + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], +) -> None: + monkeypatch.setattr(github_api.subprocess, "run", GhResponder().run) + + code = cli.run( + [ + "pr", + "timeline-expand", + "1", + "--pr", + "77928", + "--repo", + "PaddlePaddle/Paddle", + "--page-size", + "2", + "--after", + "2026-02-14T14:44:36Z", + "--expand", + "resolved,minimized", + ] + ) + assert code == 0 + + out = capsys.readouterr().out + assert "timeline_after: 2026-02-14T14:44:36Z" in out + assert "### Page 1/2" in out + assert "Review comments (3/3 shown):" in out + assert "PRRT_mock_1" in out + assert "PRRT_mock_2" in out + assert "(review hidden: outdated)" not in out + assert "resolved review comments are collapsed" not in out + assert "hidden review comments are collapsed" not in out + + +def test_invalid_timeline_window_range_reports_error( + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], +) -> None: + responder = GhResponder() + monkeypatch.setattr(github_api.subprocess, "run", responder.run) + + code = cli.run( + [ + "issue", + "view", + "77924", + "--repo", + "PaddlePaddle/Paddle", + "--after", + "2026-02-13T14:10:00Z", + "--before", + "2026-02-13T14:10:00Z", + ] + ) + assert code == 1 + + err = capsys.readouterr().err + assert "error: invalid time range: `--after` must be earlier than `--before`" in err + assert responder.calls == [] + + def test_pr_view_show_meta_skips_timeline_bootstrap( monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str], @@ -3708,6 +3885,108 @@ def test_issue_timeline_expand_with_expand_minimized( assert "(comment hidden: outdated)" not in out +def test_issue_timeline_expand_with_before_and_expand_minimized( + monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str] +) -> None: + monkeypatch.setattr(github_api.subprocess, "run", GhResponder().run) + + code = cli.run( + [ + "issue", + "timeline-expand", + "1", + "--issue", + "77924", + "--repo", + "PaddlePaddle/Paddle", + "--page-size", + "2", + "--before", + "2026-02-13T13:30:00Z", + "--expand", + "minimized", + ] + ) + assert code == 0 + out = capsys.readouterr().out + assert "timeline_before: 2026-02-13T13:30:00Z" in out + assert "### Page 1/2" in out + assert "(comment hidden: outdated)" not in out + assert "cross-reference by @alice (Alice)" in out + + +def test_issue_details_expand_with_before_uses_filtered_expanded_page( + monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str] +) -> None: + def issue_events_with_details() -> list[dict[str, Any]]: + return [ + { + "__typename": "IssueComment", + "id": "ic0", + "url": "https://example.com/ic0", + "createdAt": "2026-02-13T10:00:00Z", + "body": "older issue comment", + "isMinimized": False, + "minimizedReason": None, + "author": {"login": "alice"}, + "reactionGroups": [], + }, + { + "__typename": "IssueComment", + "id": "ic_details", + "url": "https://example.com/ic-details", + "createdAt": "2026-02-13T11:00:00Z", + "body": "intro\n
why\nfiltered details body\n
", + "isMinimized": True, + "minimizedReason": "OUTDATED", + "author": {"login": "bot"}, + "reactionGroups": [], + }, + { + "__typename": "IssueComment", + "id": "ic2", + "url": "https://example.com/ic2", + "createdAt": "2026-02-13T13:00:00Z", + "body": "later issue comment", + "isMinimized": False, + "minimizedReason": None, + "author": {"login": "ShigureNyako"}, + "reactionGroups": [], + }, + { + "__typename": "ClosedEvent", + "id": "iclose1", + "createdAt": "2026-02-13T14:00:00Z", + "actor": {"login": "ShigureNyako"}, + }, + ] + + monkeypatch.setattr(github_api.subprocess, "run", GhResponder().run) + monkeypatch.setattr(sys.modules[__name__], "_issue_events", issue_events_with_details) + + code = cli.run( + [ + "issue", + "details-expand", + "2", + "--issue", + "77924", + "--repo", + "PaddlePaddle/Paddle", + "--page-size", + "2", + "--before", + "2026-02-13T13:30:00Z", + ] + ) + assert code == 0 + out = capsys.readouterr().out + assert "## Details Blocks for Event 2" in out + assert "why" in out + assert "filtered details body" in out + assert "(details body collapsed)" not in out + + def test_pr_view_show_timeline_only(monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]) -> None: monkeypatch.setattr(github_api.subprocess, "run", GhResponder().run)