Benchmark #695
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Benchmark | |
| on: | |
| workflow_run: | |
| workflows: ["Publish"] | |
| types: [completed] | |
| workflow_dispatch: | |
| inputs: | |
| version: | |
| description: 'Version to benchmark ("dev" for local, or semver like "2.4.0" for npm)' | |
| required: false | |
| default: "dev" | |
| permissions: {} | |
| jobs: | |
| build-benchmark: | |
| runs-on: ubuntu-latest | |
| if: >- | |
| github.event_name == 'workflow_dispatch' || | |
| (github.event.workflow_run.conclusion == 'success' && | |
| github.event.workflow_run.event != 'push') | |
| permissions: | |
| actions: read | |
| contents: write | |
| pull-requests: write | |
| steps: | |
| - uses: actions/checkout@v6 | |
| with: | |
| fetch-depth: 0 | |
| ref: main | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| - uses: actions/setup-node@v6 | |
| with: | |
| node-version: "22" | |
| cache: "npm" | |
| - name: Install dependencies | |
| run: npm install --prefer-offline --no-audit --no-fund | |
| - name: Determine benchmark mode | |
| id: mode | |
| run: | | |
| if [ "${{ github.event_name }}" = "workflow_run" ]; then | |
| # Release — find latest semver tag | |
| TAG=$(git tag --sort=-version:refname --list 'v[0-9]*.[0-9]*.[0-9]*' | grep -v dev | head -1) | |
| VERSION="${TAG#v}" | |
| echo "source=npm" >> "$GITHUB_OUTPUT" | |
| echo "version=$VERSION" >> "$GITHUB_OUTPUT" | |
| elif [ "${{ inputs.version }}" = "dev" ] || [ -z "${{ inputs.version }}" ]; then | |
| echo "source=local" >> "$GITHUB_OUTPUT" | |
| echo "version=dev" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "source=npm" >> "$GITHUB_OUTPUT" | |
| echo "version=${{ inputs.version }}" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Check for existing benchmark | |
| id: existing | |
| run: | | |
| VERSION="${{ steps.mode.outputs.version }}" | |
| VERSION_RE="${VERSION//./\\.}" | |
| if [ "$VERSION" = "dev" ]; then | |
| echo "skip=false" >> "$GITHUB_OUTPUT" | |
| elif grep -qP '"version":\s*"'"$VERSION_RE"'"' generated/benchmarks/BUILD-BENCHMARKS.md 2>/dev/null; then | |
| echo "Benchmark for $VERSION already exists in BUILD-BENCHMARKS.md — skipping" | |
| echo "skip=true" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "skip=false" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Wait for npm propagation | |
| if: steps.existing.outputs.skip != 'true' && steps.mode.outputs.source == 'npm' | |
| run: | | |
| VERSION="${{ steps.mode.outputs.version }}" | |
| echo "Waiting for @optave/codegraph@${VERSION} on npm..." | |
| for i in $(seq 1 20); do | |
| if npm view "@optave/codegraph@${VERSION}" version 2>/dev/null; then | |
| echo "Package available on npm" | |
| exit 0 | |
| fi | |
| echo " Attempt $i/20 — not yet available, waiting 30s..." | |
| sleep 30 | |
| done | |
| echo "::error::Package @optave/codegraph@${VERSION} not found on npm after 10 minutes" | |
| exit 1 | |
| - name: Run build benchmark | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| ARGS="--version ${{ steps.mode.outputs.version }}" | |
| if [ "${{ steps.mode.outputs.source }}" = "npm" ]; then | |
| ARGS="$ARGS --npm" | |
| fi | |
| node $STRIP_FLAG --import ./scripts/ts-resolve-loader.js scripts/benchmark.ts $ARGS > benchmark-result.json | |
| - name: Run resolution benchmark | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| ARGS="--version ${{ steps.mode.outputs.version }}" | |
| if [ "${{ steps.mode.outputs.source }}" = "npm" ]; then | |
| ARGS="$ARGS --npm" | |
| fi | |
| node $STRIP_FLAG --import ./scripts/ts-resolve-loader.js scripts/resolution-benchmark.ts $ARGS > resolution-result.json | |
| - name: Gate on resolution thresholds | |
| if: steps.existing.outputs.skip != 'true' | |
| timeout-minutes: 30 | |
| run: npx vitest run tests/benchmarks/resolution/resolution-benchmark.test.ts --reporter=verbose | |
| - name: Setup Python (for tracer validation) | |
| if: steps.existing.outputs.skip != 'true' | |
| uses: actions/setup-python@v5 | |
| with: | |
| python-version: "3.12" | |
| - name: Setup Go (for tracer validation) | |
| if: steps.existing.outputs.skip != 'true' | |
| uses: actions/setup-go@v5 | |
| with: | |
| go-version: "stable" | |
| cache: false | |
| - name: Run tracer validation (same-file edge recall) | |
| if: steps.existing.outputs.skip != 'true' | |
| timeout-minutes: 10 | |
| run: npx vitest run tests/benchmarks/resolution/tracer/tracer-validation.test.ts --reporter=verbose | |
| - name: Merge resolution into build result | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| node -e " | |
| const fs = require('fs'); | |
| const build = JSON.parse(fs.readFileSync('benchmark-result.json', 'utf8')); | |
| const resolution = JSON.parse(fs.readFileSync('resolution-result.json', 'utf8')); | |
| build.resolution = resolution; | |
| fs.writeFileSync('benchmark-result.json', JSON.stringify(build, null, 2)); | |
| " | |
| - name: Update build report | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| node $STRIP_FLAG scripts/update-benchmark-report.ts benchmark-result.json | |
| - name: Upload build result | |
| if: steps.existing.outputs.skip != 'true' | |
| uses: actions/upload-artifact@v7 | |
| with: | |
| name: build-benchmark-result | |
| path: benchmark-result.json | |
| - name: Check for changes | |
| if: steps.existing.outputs.skip != 'true' | |
| id: changes | |
| run: | | |
| CHANGED=false | |
| # Detect modified tracked files | |
| if ! git diff --quiet HEAD -- generated/benchmarks/BUILD-BENCHMARKS.md README.md 2>/dev/null; then | |
| CHANGED=true | |
| fi | |
| # Detect newly created (untracked) files | |
| if [ -n "$(git ls-files --others --exclude-standard generated/benchmarks/BUILD-BENCHMARKS.md)" ]; then | |
| CHANGED=true | |
| fi | |
| echo "changed=$CHANGED" >> "$GITHUB_OUTPUT" | |
| - name: Commit and push via PR | |
| if: steps.existing.outputs.skip != 'true' && steps.changes.outputs.changed == 'true' | |
| env: | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| VERSION: ${{ steps.mode.outputs.version }} | |
| run: | | |
| git config user.name "github-actions[bot]" | |
| git config user.email "github-actions[bot]@users.noreply.github.com" | |
| if [ "$VERSION" = "dev" ]; then | |
| BRANCH="benchmark/build-dev-$(date +%Y%m%d-%H%M%S)" | |
| else | |
| BRANCH="benchmark/build-v${VERSION}-$(date +%Y%m%d-%H%M%S)" | |
| fi | |
| git checkout -b "$BRANCH" | |
| git add generated/benchmarks/BUILD-BENCHMARKS.md README.md | |
| git commit -m "docs: update build performance benchmarks (${VERSION})" | |
| git push origin "$BRANCH" | |
| TITLE="docs: update build performance benchmarks (${VERSION})" | |
| if gh pr list --state open --json title --jq ".[].title" | grep -qF "$TITLE"; then | |
| echo "::notice::PR already open for '$TITLE' — skipping" | |
| else | |
| gh pr create \ | |
| --base main \ | |
| --head "$BRANCH" \ | |
| --title "$TITLE" \ | |
| --body "Automated build benchmark update for **${VERSION}** from workflow run [#${{ github.run_number }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})." | |
| fi | |
| embedding-benchmark: | |
| runs-on: ubuntu-latest | |
| # 7 models x 20 min each = 140 min worst-case + ~30 min setup/npm-wait headroom | |
| timeout-minutes: 195 | |
| if: >- | |
| github.event_name == 'workflow_dispatch' || | |
| (github.event.workflow_run.conclusion == 'success' && | |
| github.event.workflow_run.event != 'push') | |
| permissions: | |
| actions: read | |
| contents: write | |
| pull-requests: write | |
| steps: | |
| - uses: actions/checkout@v6 | |
| with: | |
| fetch-depth: 0 | |
| ref: main | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| - uses: actions/setup-node@v6 | |
| with: | |
| node-version: "22" | |
| cache: "npm" | |
| - name: Install dependencies | |
| run: npm install --prefer-offline --no-audit --no-fund | |
| - name: Determine benchmark mode | |
| id: mode | |
| run: | | |
| if [ "${{ github.event_name }}" = "workflow_run" ]; then | |
| TAG=$(git tag --sort=-version:refname --list 'v[0-9]*.[0-9]*.[0-9]*' | grep -v dev | head -1) | |
| VERSION="${TAG#v}" | |
| echo "source=npm" >> "$GITHUB_OUTPUT" | |
| echo "version=$VERSION" >> "$GITHUB_OUTPUT" | |
| elif [ "${{ inputs.version }}" = "dev" ] || [ -z "${{ inputs.version }}" ]; then | |
| echo "source=local" >> "$GITHUB_OUTPUT" | |
| echo "version=dev" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "source=npm" >> "$GITHUB_OUTPUT" | |
| echo "version=${{ inputs.version }}" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Check for existing benchmark | |
| id: existing | |
| run: | | |
| VERSION="${{ steps.mode.outputs.version }}" | |
| VERSION_RE="${VERSION//./\\.}" | |
| if [ "$VERSION" = "dev" ]; then | |
| echo "skip=false" >> "$GITHUB_OUTPUT" | |
| elif grep -qP '"version":\s*"'"$VERSION_RE"'"' generated/benchmarks/EMBEDDING-BENCHMARKS.md 2>/dev/null; then | |
| echo "Benchmark for $VERSION already exists in EMBEDDING-BENCHMARKS.md — skipping" | |
| echo "skip=true" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "skip=false" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Wait for npm propagation | |
| if: steps.existing.outputs.skip != 'true' && steps.mode.outputs.source == 'npm' | |
| run: | | |
| VERSION="${{ steps.mode.outputs.version }}" | |
| echo "Waiting for @optave/codegraph@${VERSION} on npm..." | |
| for i in $(seq 1 20); do | |
| if npm view "@optave/codegraph@${VERSION}" version 2>/dev/null; then | |
| echo "Package available on npm" | |
| exit 0 | |
| fi | |
| echo " Attempt $i/20 — not yet available, waiting 30s..." | |
| sleep 30 | |
| done | |
| echo "::error::Package @optave/codegraph@${VERSION} not found on npm after 10 minutes" | |
| exit 1 | |
| - name: Cache HuggingFace models | |
| if: steps.existing.outputs.skip != 'true' | |
| uses: actions/cache@v5 | |
| with: | |
| path: ~/.cache/huggingface | |
| key: hf-models-${{ runner.os }}-${{ hashFiles('src/domain/search/**') }} | |
| restore-keys: hf-models-${{ runner.os }}- | |
| - name: Build graph | |
| if: steps.existing.outputs.skip != 'true' | |
| run: npx codegraph build . | |
| - name: Run embedding benchmark | |
| if: steps.existing.outputs.skip != 'true' | |
| timeout-minutes: 160 | |
| env: | |
| HF_TOKEN: ${{ secrets.HF_TOKEN }} | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| ARGS="--version ${{ steps.mode.outputs.version }}" | |
| if [ "${{ steps.mode.outputs.source }}" = "npm" ]; then | |
| ARGS="$ARGS --npm" | |
| fi | |
| node $STRIP_FLAG --import ./scripts/ts-resolve-loader.js scripts/embedding-benchmark.ts $ARGS > embedding-benchmark-result.json | |
| - name: Update embedding report | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| node $STRIP_FLAG scripts/update-embedding-report.ts embedding-benchmark-result.json | |
| - name: Upload embedding result | |
| if: steps.existing.outputs.skip != 'true' | |
| uses: actions/upload-artifact@v7 | |
| with: | |
| name: embedding-benchmark-result | |
| path: embedding-benchmark-result.json | |
| - name: Check for changes | |
| if: steps.existing.outputs.skip != 'true' | |
| id: changes | |
| run: | | |
| CHANGED=false | |
| # Detect modified tracked files | |
| if ! git diff --quiet HEAD -- generated/benchmarks/EMBEDDING-BENCHMARKS.md 2>/dev/null; then | |
| CHANGED=true | |
| fi | |
| # Detect newly created (untracked) files | |
| if [ -n "$(git ls-files --others --exclude-standard generated/benchmarks/EMBEDDING-BENCHMARKS.md)" ]; then | |
| CHANGED=true | |
| fi | |
| echo "changed=$CHANGED" >> "$GITHUB_OUTPUT" | |
| - name: Commit and push via PR | |
| if: steps.existing.outputs.skip != 'true' && steps.changes.outputs.changed == 'true' | |
| env: | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| VERSION: ${{ steps.mode.outputs.version }} | |
| run: | | |
| git config user.name "github-actions[bot]" | |
| git config user.email "github-actions[bot]@users.noreply.github.com" | |
| if [ "$VERSION" = "dev" ]; then | |
| BRANCH="benchmark/embedding-dev-$(date +%Y%m%d-%H%M%S)" | |
| else | |
| BRANCH="benchmark/embedding-v${VERSION}-$(date +%Y%m%d-%H%M%S)" | |
| fi | |
| git checkout -b "$BRANCH" | |
| git add generated/benchmarks/EMBEDDING-BENCHMARKS.md | |
| git commit -m "docs: update embedding benchmarks (${VERSION})" | |
| git push origin "$BRANCH" | |
| TITLE="docs: update embedding benchmarks (${VERSION})" | |
| if gh pr list --state open --json title --jq ".[].title" | grep -qF "$TITLE"; then | |
| echo "::notice::PR already open for '$TITLE' — skipping" | |
| else | |
| gh pr create \ | |
| --base main \ | |
| --head "$BRANCH" \ | |
| --title "$TITLE" \ | |
| --body "Automated embedding benchmark update for **${VERSION}** from workflow run [#${{ github.run_number }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})." | |
| fi | |
| query-benchmark: | |
| runs-on: ubuntu-latest | |
| if: >- | |
| github.event_name == 'workflow_dispatch' || | |
| (github.event.workflow_run.conclusion == 'success' && | |
| github.event.workflow_run.event != 'push') | |
| permissions: | |
| actions: read | |
| contents: write | |
| pull-requests: write | |
| steps: | |
| - uses: actions/checkout@v6 | |
| with: | |
| fetch-depth: 0 | |
| ref: main | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| - uses: actions/setup-node@v6 | |
| with: | |
| node-version: "22" | |
| cache: "npm" | |
| - name: Install dependencies | |
| run: npm install --prefer-offline --no-audit --no-fund | |
| - name: Determine benchmark mode | |
| id: mode | |
| run: | | |
| if [ "${{ github.event_name }}" = "workflow_run" ]; then | |
| TAG=$(git tag --sort=-version:refname --list 'v[0-9]*.[0-9]*.[0-9]*' | grep -v dev | head -1) | |
| VERSION="${TAG#v}" | |
| echo "source=npm" >> "$GITHUB_OUTPUT" | |
| echo "version=$VERSION" >> "$GITHUB_OUTPUT" | |
| elif [ "${{ inputs.version }}" = "dev" ] || [ -z "${{ inputs.version }}" ]; then | |
| echo "source=local" >> "$GITHUB_OUTPUT" | |
| echo "version=dev" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "source=npm" >> "$GITHUB_OUTPUT" | |
| echo "version=${{ inputs.version }}" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Check for existing benchmark | |
| id: existing | |
| run: | | |
| VERSION="${{ steps.mode.outputs.version }}" | |
| VERSION_RE="${VERSION//./\\.}" | |
| if [ "$VERSION" = "dev" ]; then | |
| echo "skip=false" >> "$GITHUB_OUTPUT" | |
| elif grep -qP '"version":\s*"'"$VERSION_RE"'"' generated/benchmarks/QUERY-BENCHMARKS.md 2>/dev/null; then | |
| echo "Benchmark for $VERSION already exists in QUERY-BENCHMARKS.md — skipping" | |
| echo "skip=true" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "skip=false" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Wait for npm propagation | |
| if: steps.existing.outputs.skip != 'true' && steps.mode.outputs.source == 'npm' | |
| run: | | |
| VERSION="${{ steps.mode.outputs.version }}" | |
| echo "Waiting for @optave/codegraph@${VERSION} on npm..." | |
| for i in $(seq 1 20); do | |
| if npm view "@optave/codegraph@${VERSION}" version 2>/dev/null; then | |
| echo "Package available on npm" | |
| exit 0 | |
| fi | |
| echo " Attempt $i/20 — not yet available, waiting 30s..." | |
| sleep 30 | |
| done | |
| echo "::error::Package @optave/codegraph@${VERSION} not found on npm after 10 minutes" | |
| exit 1 | |
| - name: Run query benchmark | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| ARGS="--version ${{ steps.mode.outputs.version }}" | |
| if [ "${{ steps.mode.outputs.source }}" = "npm" ]; then | |
| ARGS="$ARGS --npm" | |
| fi | |
| node $STRIP_FLAG --import ./scripts/ts-resolve-loader.js scripts/query-benchmark.ts $ARGS > query-benchmark-result.json | |
| - name: Update query report | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| node $STRIP_FLAG scripts/update-query-report.ts query-benchmark-result.json | |
| - name: Upload query result | |
| if: steps.existing.outputs.skip != 'true' | |
| uses: actions/upload-artifact@v7 | |
| with: | |
| name: query-benchmark-result | |
| path: query-benchmark-result.json | |
| - name: Check for changes | |
| if: steps.existing.outputs.skip != 'true' | |
| id: changes | |
| run: | | |
| CHANGED=false | |
| # Detect modified tracked files | |
| if ! git diff --quiet HEAD -- generated/benchmarks/QUERY-BENCHMARKS.md 2>/dev/null; then | |
| CHANGED=true | |
| fi | |
| # Detect newly created (untracked) files | |
| if [ -n "$(git ls-files --others --exclude-standard generated/benchmarks/QUERY-BENCHMARKS.md)" ]; then | |
| CHANGED=true | |
| fi | |
| echo "changed=$CHANGED" >> "$GITHUB_OUTPUT" | |
| - name: Commit and push via PR | |
| if: steps.existing.outputs.skip != 'true' && steps.changes.outputs.changed == 'true' | |
| env: | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| VERSION: ${{ steps.mode.outputs.version }} | |
| run: | | |
| git config user.name "github-actions[bot]" | |
| git config user.email "github-actions[bot]@users.noreply.github.com" | |
| if [ "$VERSION" = "dev" ]; then | |
| BRANCH="benchmark/query-dev-$(date +%Y%m%d-%H%M%S)" | |
| else | |
| BRANCH="benchmark/query-v${VERSION}-$(date +%Y%m%d-%H%M%S)" | |
| fi | |
| git checkout -b "$BRANCH" | |
| git add generated/benchmarks/QUERY-BENCHMARKS.md | |
| git commit -m "docs: update query benchmarks (${VERSION})" | |
| git push origin "$BRANCH" | |
| TITLE="docs: update query benchmarks (${VERSION})" | |
| if gh pr list --state open --json title --jq ".[].title" | grep -qF "$TITLE"; then | |
| echo "::notice::PR already open for '$TITLE' — skipping" | |
| else | |
| gh pr create \ | |
| --base main \ | |
| --head "$BRANCH" \ | |
| --title "$TITLE" \ | |
| --body "Automated query benchmark update for **${VERSION}** from workflow run [#${{ github.run_number }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})." | |
| fi | |
| incremental-benchmark: | |
| runs-on: ubuntu-latest | |
| if: >- | |
| github.event_name == 'workflow_dispatch' || | |
| (github.event.workflow_run.conclusion == 'success' && | |
| github.event.workflow_run.event != 'push') | |
| permissions: | |
| actions: read | |
| contents: write | |
| pull-requests: write | |
| steps: | |
| - uses: actions/checkout@v6 | |
| with: | |
| fetch-depth: 0 | |
| ref: main | |
| token: ${{ secrets.GITHUB_TOKEN }} | |
| - uses: actions/setup-node@v6 | |
| with: | |
| node-version: "22" | |
| cache: "npm" | |
| - name: Install dependencies | |
| run: npm install --prefer-offline --no-audit --no-fund | |
| - name: Determine benchmark mode | |
| id: mode | |
| run: | | |
| if [ "${{ github.event_name }}" = "workflow_run" ]; then | |
| TAG=$(git tag --sort=-version:refname --list 'v[0-9]*.[0-9]*.[0-9]*' | grep -v dev | head -1) | |
| VERSION="${TAG#v}" | |
| echo "source=npm" >> "$GITHUB_OUTPUT" | |
| echo "version=$VERSION" >> "$GITHUB_OUTPUT" | |
| elif [ "${{ inputs.version }}" = "dev" ] || [ -z "${{ inputs.version }}" ]; then | |
| echo "source=local" >> "$GITHUB_OUTPUT" | |
| echo "version=dev" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "source=npm" >> "$GITHUB_OUTPUT" | |
| echo "version=${{ inputs.version }}" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Check for existing benchmark | |
| id: existing | |
| run: | | |
| VERSION="${{ steps.mode.outputs.version }}" | |
| VERSION_RE="${VERSION//./\\.}" | |
| if [ "$VERSION" = "dev" ]; then | |
| echo "skip=false" >> "$GITHUB_OUTPUT" | |
| elif grep -qP '"version":\s*"'"$VERSION_RE"'"' generated/benchmarks/INCREMENTAL-BENCHMARKS.md 2>/dev/null; then | |
| echo "Benchmark for $VERSION already exists in INCREMENTAL-BENCHMARKS.md — skipping" | |
| echo "skip=true" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "skip=false" >> "$GITHUB_OUTPUT" | |
| fi | |
| - name: Wait for npm propagation | |
| if: steps.existing.outputs.skip != 'true' && steps.mode.outputs.source == 'npm' | |
| run: | | |
| VERSION="${{ steps.mode.outputs.version }}" | |
| echo "Waiting for @optave/codegraph@${VERSION} on npm..." | |
| for i in $(seq 1 20); do | |
| if npm view "@optave/codegraph@${VERSION}" version 2>/dev/null; then | |
| echo "Package available on npm" | |
| exit 0 | |
| fi | |
| echo " Attempt $i/20 — not yet available, waiting 30s..." | |
| sleep 30 | |
| done | |
| echo "::error::Package @optave/codegraph@${VERSION} not found on npm after 10 minutes" | |
| exit 1 | |
| - name: Run incremental benchmark | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| ARGS="--version ${{ steps.mode.outputs.version }}" | |
| if [ "${{ steps.mode.outputs.source }}" = "npm" ]; then | |
| ARGS="$ARGS --npm" | |
| fi | |
| node $STRIP_FLAG --import ./scripts/ts-resolve-loader.js scripts/incremental-benchmark.ts $ARGS > incremental-benchmark-result.json | |
| - name: Update incremental report | |
| if: steps.existing.outputs.skip != 'true' | |
| run: | | |
| STRIP_FLAG=$(node -e "const [M]=process.versions.node.split('.').map(Number); console.log(M>=23?'--strip-types':'--experimental-strip-types')") | |
| node $STRIP_FLAG scripts/update-incremental-report.ts incremental-benchmark-result.json | |
| - name: Upload incremental result | |
| if: steps.existing.outputs.skip != 'true' | |
| uses: actions/upload-artifact@v7 | |
| with: | |
| name: incremental-benchmark-result | |
| path: incremental-benchmark-result.json | |
| - name: Check for changes | |
| if: steps.existing.outputs.skip != 'true' | |
| id: changes | |
| run: | | |
| CHANGED=false | |
| # Detect modified tracked files | |
| if ! git diff --quiet HEAD -- generated/benchmarks/INCREMENTAL-BENCHMARKS.md 2>/dev/null; then | |
| CHANGED=true | |
| fi | |
| # Detect newly created (untracked) files | |
| if [ -n "$(git ls-files --others --exclude-standard generated/benchmarks/INCREMENTAL-BENCHMARKS.md)" ]; then | |
| CHANGED=true | |
| fi | |
| echo "changed=$CHANGED" >> "$GITHUB_OUTPUT" | |
| - name: Commit and push via PR | |
| if: steps.existing.outputs.skip != 'true' && steps.changes.outputs.changed == 'true' | |
| env: | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| VERSION: ${{ steps.mode.outputs.version }} | |
| run: | | |
| git config user.name "github-actions[bot]" | |
| git config user.email "github-actions[bot]@users.noreply.github.com" | |
| if [ "$VERSION" = "dev" ]; then | |
| BRANCH="benchmark/incremental-dev-$(date +%Y%m%d-%H%M%S)" | |
| else | |
| BRANCH="benchmark/incremental-v${VERSION}-$(date +%Y%m%d-%H%M%S)" | |
| fi | |
| git checkout -b "$BRANCH" | |
| git add generated/benchmarks/INCREMENTAL-BENCHMARKS.md | |
| git commit -m "docs: update incremental benchmarks (${VERSION})" | |
| git push origin "$BRANCH" | |
| TITLE="docs: update incremental benchmarks (${VERSION})" | |
| if gh pr list --state open --json title --jq ".[].title" | grep -qF "$TITLE"; then | |
| echo "::notice::PR already open for '$TITLE' — skipping" | |
| else | |
| gh pr create \ | |
| --base main \ | |
| --head "$BRANCH" \ | |
| --title "$TITLE" \ | |
| --body "Automated incremental benchmark update for **${VERSION}** from workflow run [#${{ github.run_number }}](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})." | |
| fi |