diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4a28e033..6e603d37 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -8,15 +8,25 @@ ## Testing - [ ] `npm run lint` +- [ ] `npm run typecheck` - [ ] `npm run build` - [ ] `npm test` +- [ ] `npm run docs:check` - [ ] Not applicable +## Docs Impact + +- [ ] README or docs updated +- [ ] No docs changes needed + ## Compliance Confirmation - [ ] This change stays within the repository scope and OpenAI Terms of Service expectations. - [ ] This change uses official authentication flows only and does not add bypass, scraping, or credential-sharing behavior. - [ ] I updated tests and documentation when the change affected users, maintainers, or repository behavior. +- [ ] No auth, request-routing, or storage paths changed. +- [ ] I manually tested with a real ChatGPT Plus/Pro account. +- Maintainers can apply the `maintainer-live-verified` label after independent live verification. ## Notes diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..5cd32fd6 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,335 @@ +name: CI + +on: + pull_request: + push: + branches: + - main + merge_group: + +permissions: + contents: read + +concurrency: + group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + CI: true + HUSKY: 0 + +jobs: + changes: + name: Detect changes + runs-on: ubuntu-latest + timeout-minutes: 10 + outputs: + code_changed: ${{ steps.detect.outputs.code_changed }} + docs_changed: ${{ steps.detect.outputs.docs_changed }} + workflow_changed: ${{ steps.detect.outputs.workflow_changed }} + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + fetch-depth: 0 + + - name: Classify changed files + id: detect + shell: bash + env: + EVENT_NAME: ${{ github.event_name }} + BASE_REF: ${{ github.base_ref }} + BEFORE_SHA: ${{ github.event.before }} + run: | + set -euo pipefail + + docs_changed=false + code_changed=false + workflow_changed=false + files=() + + collect_changed_files() { + local range="$1" + + while IFS=$'\t' read -r status first_path second_path; do + [[ -z "${status}" ]] && continue + + case "${status}" in + R*|C*) + [[ -n "${first_path}" ]] && files+=("${first_path}") + [[ -n "${second_path}" ]] && files+=("${second_path}") + ;; + *) + [[ -n "${first_path}" ]] && files+=("${first_path}") + ;; + esac + done < <(git diff --find-renames --name-status "${range}") + } + + if [[ "${EVENT_NAME}" == "pull_request" ]]; then + git fetch --no-tags --depth=1 origin "${BASE_REF}" + collect_changed_files "origin/${BASE_REF}...HEAD" + elif [[ "${EVENT_NAME}" == "push" && -n "${BEFORE_SHA}" && "${BEFORE_SHA}" != "0000000000000000000000000000000000000000" ]]; then + collect_changed_files "${BEFORE_SHA}...HEAD" + else + # No reliable diff range is available on this event. docs_changed and + # code_changed are forced below, and workflow_changed intentionally + # stays false unless an actual diff classified workflow files. + : + fi + + for file in "${files[@]}"; do + [[ -z "${file}" ]] && continue + is_docs_markdown=false + + if [[ "${file}" =~ ^(README\.md|CONTRIBUTING\.md|CHANGELOG\.md|SECURITY\.md)$ ]] || [[ "${file}" =~ ^(\.github|config|docs|test)/.+\.(md|markdown)$ ]]; then + docs_changed=true + is_docs_markdown=true + fi + + case "${file}" in + scripts/ci/*) + docs_changed=true + code_changed=true + ;; + .github/workflows/*) + workflow_changed=true + code_changed=true + ;; + *) + if [[ "${is_docs_markdown}" != "true" ]]; then + code_changed=true + fi + ;; + esac + done + + if [[ "${EVENT_NAME}" != "pull_request" ]]; then + # Run full docs/code lanes on non-PR events, but keep workflow_changed + # diff-based so actionlint only runs when workflow files actually change. + docs_changed=true + code_changed=true + fi + + { + echo "docs_changed=${docs_changed}" + echo "code_changed=${code_changed}" + echo "workflow_changed=${workflow_changed}" + } >> "${GITHUB_OUTPUT}" + + lint: + name: lint + needs: changes + if: needs.changes.outputs.code_changed == 'true' + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version-file: .nvmrc + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Lint + run: npm run lint + + typecheck: + name: typecheck + needs: changes + if: needs.changes.outputs.code_changed == 'true' + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version-file: .nvmrc + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Type check + run: npm run typecheck + + build: + name: build + needs: changes + if: needs.changes.outputs.code_changed == 'true' + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version-file: .nvmrc + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Build + run: npm run build + + unit-linux: + name: unit (linux) + needs: changes + if: needs.changes.outputs.code_changed == 'true' + runs-on: ubuntu-latest + timeout-minutes: 25 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version-file: .nvmrc + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Run tests + run: npm test + + unit-windows: + name: unit (windows) + needs: changes + if: needs.changes.outputs.code_changed == 'true' + runs-on: windows-latest + timeout-minutes: 30 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version-file: .nvmrc + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Run tests + run: npm test -- --reporter=verbose + + docs-sanity: + name: docs-sanity + needs: changes + if: needs.changes.outputs.docs_changed == 'true' + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version-file: .nvmrc + + # docs-check.js is intentionally limited to Node built-ins, so this lane can skip npm ci. + - name: Verify markdown links and CI badge targets + run: npm run docs:check + + actionlint: + name: actionlint + needs: changes + if: needs.changes.outputs.workflow_changed == 'true' + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Lint GitHub Actions workflows + uses: docker://rhysd/actionlint@sha256:5457037ba91acd225478edac3d4b32e45cf6c10291e0dabbfd2491c63129afe1 # rhysd/actionlint:1.7.11 linux/amd64 + with: + args: -color + + required-pr: + name: required-pr + needs: + - changes + - lint + - typecheck + - build + - unit-linux + - unit-windows + - docs-sanity + - actionlint + if: always() + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - name: Evaluate required checks + shell: bash + env: + CHANGES_RESULT: ${{ needs.changes.result }} + CODE_CHANGED: ${{ needs.changes.outputs.code_changed }} + DOCS_CHANGED: ${{ needs.changes.outputs.docs_changed }} + WORKFLOW_CHANGED: ${{ needs.changes.outputs.workflow_changed }} + LINT_RESULT: ${{ needs.lint.result }} + TYPECHECK_RESULT: ${{ needs.typecheck.result }} + BUILD_RESULT: ${{ needs.build.result }} + UNIT_LINUX_RESULT: ${{ needs.unit-linux.result }} + UNIT_WINDOWS_RESULT: ${{ needs.unit-windows.result }} + DOCS_RESULT: ${{ needs.docs-sanity.result }} + ACTIONLINT_RESULT: ${{ needs.actionlint.result }} + run: | + set -euo pipefail + failures=() + + if [[ "${CHANGES_RESULT}" != "success" ]]; then + echo "Changes detection job did not succeed (result: ${CHANGES_RESULT}). Failing gate." + exit 1 + fi + + if [[ "${CODE_CHANGED}" == "true" && "${LINT_RESULT}" != "success" ]]; then + failures+=("lint") + fi + + if [[ "${CODE_CHANGED}" == "true" && "${TYPECHECK_RESULT}" != "success" ]]; then + failures+=("typecheck") + fi + + if [[ "${CODE_CHANGED}" == "true" && "${BUILD_RESULT}" != "success" ]]; then + failures+=("build") + fi + + if [[ "${CODE_CHANGED}" == "true" && "${UNIT_LINUX_RESULT}" != "success" ]]; then + failures+=("unit-linux") + fi + + if [[ "${CODE_CHANGED}" == "true" && "${UNIT_WINDOWS_RESULT}" != "success" ]]; then + failures+=("unit-windows") + fi + + if [[ "${DOCS_CHANGED}" == "true" && "${DOCS_RESULT}" != "success" ]]; then + failures+=("docs-sanity") + fi + + if [[ "${WORKFLOW_CHANGED}" == "true" && "${ACTIONLINT_RESULT}" != "success" ]]; then + failures+=("actionlint") + fi + + if [[ ${#failures[@]} -gt 0 ]]; then + echo "Required checks failed: ${failures[*]}" + exit 1 + fi + + echo "All required PR checks passed." diff --git a/.github/workflows/pr-advisory.yml b/.github/workflows/pr-advisory.yml new file mode 100644 index 00000000..e4d64901 --- /dev/null +++ b/.github/workflows/pr-advisory.yml @@ -0,0 +1,247 @@ +name: PR Advisory + +on: + pull_request: + push: + branches: + - main + schedule: + - cron: "23 6 * * *" + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: advisory-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref || github.run_id }} + cancel-in-progress: true + +env: + CI: true + HUSKY: 0 + +jobs: + detect-dependency-change: + name: detect-dependency-change + runs-on: ubuntu-latest + timeout-minutes: 10 + outputs: + dependency_changed: ${{ steps.detect.outputs.dependency_changed }} + code_changed: ${{ steps.detect.outputs.code_changed }} + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + fetch-depth: 0 + + - name: Detect dependency changes + id: detect + shell: bash + env: + EVENT_NAME: ${{ github.event_name }} + BASE_REF: ${{ github.base_ref }} + BEFORE_SHA: ${{ github.event.before }} + run: | + set -euo pipefail + + dependency_changed=false + code_changed=false + files=() + + collect_changed_files() { + local range="$1" + + while IFS=$'\t' read -r status first_path second_path; do + [[ -z "${status}" ]] && continue + + case "${status}" in + R*|C*) + [[ -n "${first_path}" ]] && files+=("${first_path}") + [[ -n "${second_path}" ]] && files+=("${second_path}") + ;; + *) + [[ -n "${first_path}" ]] && files+=("${first_path}") + ;; + esac + done < <(git diff --find-renames --name-status "${range}") + } + + if [[ "${EVENT_NAME}" == "schedule" || "${EVENT_NAME}" == "workflow_dispatch" ]]; then + dependency_changed=true + code_changed=true + elif [[ "${EVENT_NAME}" == "pull_request" ]]; then + git fetch --no-tags --depth=1 origin "${BASE_REF}" + collect_changed_files "origin/${BASE_REF}...HEAD" + elif [[ "${EVENT_NAME}" == "push" && -n "${BEFORE_SHA}" && "${BEFORE_SHA}" != "0000000000000000000000000000000000000000" ]]; then + collect_changed_files "${BEFORE_SHA}...HEAD" + else + dependency_changed=true + code_changed=true + fi + + for file in "${files[@]}"; do + [[ -z "${file}" ]] && continue + is_docs_markdown=false + + if [[ "${file}" =~ ^(README\.md|CONTRIBUTING\.md|CHANGELOG\.md|SECURITY\.md)$ ]] || [[ "${file}" =~ ^(\.github|config|docs|test)/.+\.(md|markdown)$ ]]; then + is_docs_markdown=true + fi + + if [[ "${file}" =~ ^(package\.json|package-lock\.json)$ ]]; then + dependency_changed=true + fi + + if [[ "${is_docs_markdown}" != "true" ]]; then + code_changed=true + fi + done + + { + echo "dependency_changed=${dependency_changed}" + echo "code_changed=${code_changed}" + } >> "${GITHUB_OUTPUT}" + + coverage: + name: coverage + needs: detect-dependency-change + if: needs.detect-dependency-change.outputs.code_changed == 'true' + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version-file: .nvmrc + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Run coverage + id: coverage + continue-on-error: true + run: npm run test:coverage + + - name: Upload coverage artifacts + if: always() + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7 + with: + name: coverage-report-${{ github.run_id }} + path: coverage + if-no-files-found: ignore + + - name: Write coverage summary + if: always() + shell: bash + env: + COVERAGE_OUTCOME: ${{ steps.coverage.outcome }} + run: | + { + echo "## Coverage" + if [[ "${COVERAGE_OUTCOME}" == "success" ]]; then + echo "" + echo "- \`npm run test:coverage\` passed." + else + echo "" + echo "- \`npm run test:coverage\` is currently advisory." + echo "- The command failed during this run. Keep the artifact for inspection and remediate coverage before promoting it to a required gate." + fi + } >> "${GITHUB_STEP_SUMMARY}" + + compat-matrix: + name: compat-matrix (${{ matrix.os }}, Node ${{ matrix.node }}) + needs: detect-dependency-change + if: needs.detect-dependency-change.outputs.code_changed == 'true' + runs-on: ${{ matrix.os }} + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-latest + node: 18 + - os: ubuntu-latest + node: 22 + - os: macos-latest + node: 20 # Intentionally pins the current default advisory lane; update when .nvmrc changes. + - os: windows-latest + node: 18 # Verify Windows + LTS compatibility; antivirus locking is a known risk. + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version: ${{ matrix.node }} + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Build + run: npm run build + + - name: Run tests + run: npm test + + - name: Write compat summary + if: always() + shell: bash + env: + COMPAT_OUTCOME: ${{ job.status }} + run: | + { + echo "## Compat matrix (${{ matrix.os }}, Node ${{ matrix.node }})" + echo "" + if [[ "${COMPAT_OUTCOME}" == "success" ]]; then + echo "- build + tests passed." + else + echo "- build or tests failed. Review logs before promoting this lane to required." + fi + } >> "${GITHUB_STEP_SUMMARY}" + + dependency-audit: + name: dependency-audit + needs: detect-dependency-change + if: needs.detect-dependency-change.outputs.dependency_changed == 'true' + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - name: Check out repository + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + + - name: Set up Node.js + uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6 + with: + node-version-file: .nvmrc + cache: npm + + - name: Install dependencies + run: npm ci + + - name: Run dependency audit + id: audit + continue-on-error: true + run: npm run audit:ci + + - name: Write dependency audit summary + if: always() + shell: bash + env: + AUDIT_OUTCOME: ${{ steps.audit.outcome }} + run: | + { + echo "## Dependency audit" + if [[ "${AUDIT_OUTCOME}" == "success" ]]; then + echo "" + echo "- \`npm run audit:ci\` passed." + else + echo "" + echo "- \`npm run audit:ci\` is currently advisory." + echo "- The audit found issues or baseline failures. Review the job log before promoting this lane to required status." + fi + } >> "${GITHUB_STEP_SUMMARY}" diff --git a/.github/workflows/pr-governance.yml b/.github/workflows/pr-governance.yml new file mode 100644 index 00000000..6ca8f3fc --- /dev/null +++ b/.github/workflows/pr-governance.yml @@ -0,0 +1,302 @@ +name: PR Governance + +on: + pull_request_target: + types: + - opened + - edited + - labeled + - reopened + - synchronize + - unlabeled + - ready_for_review + +permissions: + contents: read + issues: write + pull-requests: read + +concurrency: + # Serialize governance per PR so label/state mutations observe live state and + # synchronize runs are not canceled before invalidation logic completes. + group: governance-${{ github.event.pull_request.number }} + cancel-in-progress: false + +jobs: + pr-governance: + # SECURITY: this job uses pull_request_target so the GitHub token carries + # issues: write even for fork PRs. DO NOT add a checkout of the PR head ref + # here - doing so would let a fork PR execute arbitrary code with write access. + name: pr-governance + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - name: Validate PR template and live verification markers + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 + with: + script: | + const owner = context.repo.owner; + const repo = context.repo.repo; + const issueNumber = context.payload.pull_request.number; + const body = context.payload.pull_request.body || ""; + function stripMarkdownCode(markdown) { + const output = []; + const lines = markdown.split(/\r?\n/); + let inFence = false; + let fenceChar = ""; + let fenceLength = 0; + + for (const line of lines) { + if (!inFence) { + const openingFence = line.match(/^(?: {0,3})(`{3,}|~{3,})[^\n]*$/); + if (openingFence) { + inFence = true; + fenceChar = openingFence[1][0]; + fenceLength = openingFence[1].length; + output.push(""); + continue; + } + + output.push(line); + continue; + } + + const closingFence = new RegExp(`^(?: {0,3})${fenceChar}{${fenceLength},}[^\\n]*$`); + if (closingFence.test(line)) { + inFence = false; + fenceChar = ""; + fenceLength = 0; + } + + output.push(""); + } + + return output.join("\n").replace(/(`+)([^`\n]|`(?!\1))*\1/g, ""); + } + + const strippedBody = stripMarkdownCode(body.replace(//g, "")); + const liveLabels = await github.paginate(github.rest.issues.listLabelsOnIssue, { + owner, + repo, + issue_number: issueNumber, + per_page: 100, + }); + const currentLabels = new Set( + liveLabels + .map((label) => label?.name) + .filter((name) => typeof name === "string"), + ); + const files = await github.paginate(github.rest.pulls.listFiles, { + owner, + repo, + pull_number: issueNumber, + per_page: 100, + }); + + const changedFiles = files.map((file) => file.filename); + const riskyMatchers = [ + /^index\.ts$/, + /^lib\/auth\//, + /^lib\/request\//, + /^lib\/storage(?:\/|\.ts$)/, + /^lib\/recovery\/storage\.ts$/, + ]; + + const riskyPaths = files + .filter((file) => + [file.filename, file.previous_filename].some( + (candidatePath) => candidatePath && riskyMatchers.some((matcher) => matcher.test(candidatePath)) + ) + ) + .map((file) => file.filename); + const liveVerificationRequired = riskyPaths.length > 0; + + const requiredHeadings = [ + /^## Summary\s*$/m, + /^## Testing\s*$/m, + /^## Docs Impact\s*$/m, + /^## Compliance Confirmation\s*$/m, + /^## Notes\s*$/m, + ]; + + const missingSections = requiredHeadings.filter((pattern) => !pattern.test(strippedBody)); + function getSectionContent(markdown, heading) { + const headingPattern = new RegExp(`^## ${heading}\\s*$`, "m"); + const match = headingPattern.exec(markdown); + if (!match) return ""; + + const sectionStart = match.index + match[0].length; + const nextHeadingMatch = /^##\s+/m.exec(markdown.slice(sectionStart)); + const sectionEnd = nextHeadingMatch ? sectionStart + nextHeadingMatch.index : markdown.length; + return markdown.slice(sectionStart, sectionEnd); + } + + function hasCheckedChecklistLine(section, itemText) { + const escapedText = itemText.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); + return new RegExp(`^\\s*-\\s*\\[x\\]\\s+${escapedText}\\s*$`, "im").test(section); + } + + const docsImpactSection = getSectionContent(strippedBody, "Docs Impact"); + const complianceSection = getSectionContent(strippedBody, "Compliance Confirmation"); + const complianceChecked = hasCheckedChecklistLine( + complianceSection, + "This change stays within the repository scope and OpenAI Terms of Service expectations.", + ); + const docsImpactChecked = + hasCheckedChecklistLine(docsImpactSection, "README or docs updated") || + hasCheckedChecklistLine(docsImpactSection, "No docs changes needed"); + const officialAuthChecked = hasCheckedChecklistLine( + complianceSection, + "This change uses official authentication flows only and does not add bypass, scraping, or credential-sharing behavior.", + ); + const testsDocsChecked = hasCheckedChecklistLine( + complianceSection, + "I updated tests and documentation when the change affected users, maintainers, or repository behavior.", + ); + const noLiveRequiredChecked = hasCheckedChecklistLine( + complianceSection, + "No auth, request-routing, or storage paths changed.", + ); + const manualLiveChecked = hasCheckedChecklistLine( + complianceSection, + "I manually tested with a real ChatGPT Plus/Pro account.", + ); + const liveVerificationLabelName = "needs-live-verification"; + const maintainerVerifiedLabelName = "maintainer-live-verified"; + const invalidateMaintainerVerification = + context.payload.action === "synchronize" || context.payload.action === "reopened"; + let maintainerLiveVerified = currentLabels.has(maintainerVerifiedLabelName); + + async function ensureLabel(name, color, description) { + try { + await github.rest.issues.getLabel({ owner, repo, name }); + } catch (error) { + if (error.status !== 404) throw error; + try { + await github.rest.issues.createLabel({ + owner, + repo, + name, + color, + description, + }); + } catch (createError) { + if (createError.status === 422) return; + if (String(createError.message ?? "").includes("already_exists")) return; + throw createError; + } + } + } + + if (liveVerificationRequired) { + await ensureLabel( + maintainerVerifiedLabelName, + "0e8a16", + "Maintainer completed independent live ChatGPT Plus/Pro verification", + ); + await ensureLabel( + liveVerificationLabelName, + "b60205", + "Manual ChatGPT Plus/Pro verification required before merge", + ); + await github.rest.issues.addLabels({ + owner, + repo, + issue_number: issueNumber, + labels: [liveVerificationLabelName], + }); + + if (invalidateMaintainerVerification && maintainerLiveVerified) { + try { + await github.rest.issues.removeLabel({ + owner, + repo, + issue_number: issueNumber, + name: maintainerVerifiedLabelName, + }); + } catch (error) { + if (error.status !== 404) throw error; + } + maintainerLiveVerified = false; + } + } else { + try { + await github.rest.issues.removeLabel({ + owner, + repo, + issue_number: issueNumber, + name: liveVerificationLabelName, + }); + } catch (error) { + if (error.status !== 404) throw error; + } + + if (invalidateMaintainerVerification && maintainerLiveVerified) { + try { + await github.rest.issues.removeLabel({ + owner, + repo, + issue_number: issueNumber, + name: maintainerVerifiedLabelName, + }); + } catch (error) { + if (error.status !== 404) throw error; + } + maintainerLiveVerified = false; + } + } + + const failures = []; + + if (missingSections.length > 0) { + failures.push("PR description is missing one or more required template sections."); + } + + if (!complianceChecked) { + failures.push("The compliance checkbox must be checked."); + } + + if (!docsImpactChecked) { + failures.push("Check one of the Docs Impact options."); + } + + if (!officialAuthChecked) { + failures.push("The official-authentication checkbox must be checked."); + } + + if (!testsDocsChecked) { + failures.push("The tests/documentation checkbox must be checked."); + } + + if (liveVerificationRequired) { + if (!manualLiveChecked && !maintainerLiveVerified) { + failures.push("This PR touches auth/request/storage paths and needs either the manual live-test checkbox or the maintainer-live-verified label."); + } + } else if (!noLiveRequiredChecked && !manualLiveChecked && !maintainerLiveVerified) { + failures.push('Check either "No auth, request-routing, or storage paths changed." or "I manually tested with a real ChatGPT Plus/Pro account.", or have a maintainer apply the maintainer-live-verified label.'); + } + + const summaryLines = [ + "## PR governance", + "", + `- Changed files: ${changedFiles.length}`, + `- Live verification required: ${liveVerificationRequired ? "yes" : "no"}`, + `- Maintainer verification label present: ${maintainerLiveVerified ? "yes" : "no"}`, + ]; + + if (riskyPaths.length > 0) { + summaryLines.push(`- Risky paths: ${riskyPaths.join(", ")}`); + } + + try { + await core.summary.addRaw(summaryLines.join("\n")).write(); + } catch (summaryError) { + const message = + summaryError instanceof Error ? summaryError.message : String(summaryError); + core.warning(`Failed to write PR governance step summary: ${message}`); + console.log(summaryLines.join("\n")); + } + + if (failures.length > 0) { + core.setFailed(failures.join("\n")); + } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 192bf09b..021f5bd6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -63,14 +63,33 @@ The project does not accept work aimed at: 3. **Include tests** for new functionality 4. **Update documentation** (README.md, config examples, etc.) 5. **Ensure compliance** with guidelines above -6. **Test thoroughly** with the most appropriate validation for the change -7. **Complete the pull request template** with summary, testing, and compliance details -8. **Submit PR** with clear description of changes +6. **Run local validation**: `npm run lint`, `npm run typecheck`, `npm run build`, `npm test`, and `npm run docs:check` when docs or workflow files change +7. **Test thoroughly** with the most appropriate validation for the change, including real ChatGPT Plus/Pro checks when touching auth, request-routing, or storage behavior +8. **Complete the pull request template** with summary, testing, docs impact, and compliance details +9. **Submit PR** with clear description of changes Pull requests are automatically screened for incomplete or suspicious submissions. Legitimate contributions are still welcome, but low-signal PRs may be flagged for maintainer review before they move forward. If a PR is flagged incorrectly, a maintainer can override the workflow with the `exempt` label after review. +## CI and PR Checks + +The repository uses split required and advisory PR lanes: + +- Required code validation: `lint`, `typecheck`, `build`, `unit (linux)`, and `unit (windows)` +- Required conditional validation: `docs-sanity` for Markdown changes and `actionlint` for workflow changes +- Required policy checks: `required-pr` and `pr-governance` +- Advisory lanes: `coverage`, `compat-matrix`, and `dependency-audit` + +`required-pr` is the aggregate gate that evaluates the required code, docs, and workflow jobs. `pr-governance` separately enforces the PR template and live-verification requirements for auth, request-routing, and storage changes. Maintainers can record an independent live check by applying the `maintainer-live-verified` label. + +For maintainers, GitHub branch protection should require only: + +- `required-pr` +- `pr-governance` + +Advisory lanes should remain unrequired until their baselines are clean and stable. + ## Reporting Issues When reporting issues, please: diff --git a/README.md b/README.md index 3b9a3d29..083c45d4 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![npm version](https://img.shields.io/npm/v/oc-chatgpt-multi-auth.svg)](https://www.npmjs.com/package/oc-chatgpt-multi-auth) [![npm downloads](https://img.shields.io/npm/dw/oc-chatgpt-multi-auth.svg)](https://www.npmjs.com/package/oc-chatgpt-multi-auth) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE) +[![Tests](https://github.com/ndycode/oc-chatgpt-multi-auth/actions/workflows/ci.yml/badge.svg)](https://github.com/ndycode/oc-chatgpt-multi-auth/actions/workflows/ci.yml) Use your ChatGPT Plus/Pro subscription inside OpenCode with OAuth login, GPT-5/Codex model presets, and multi-account failover. @@ -137,7 +138,9 @@ Short answers for the most common questions live in [docs/faq.md](docs/faq.md), Contributions are welcome if they keep the project accurate, maintainable, and aligned with its personal-use scope. +- Pull requests run split required checks (`lint`, `typecheck`, `build`, Linux and Windows unit tests, docs/workflow validation) plus advisory coverage, compatibility, and dependency-audit lanes. - [Contributing Guide](CONTRIBUTING.md) +- [Testing & CI Notes](docs/development/TESTING.md) - [Code of Conduct](CODE_OF_CONDUCT.md) - [Security Policy](SECURITY.md) diff --git a/docs/development/CONFIG_FIELDS.md b/docs/development/CONFIG_FIELDS.md index 465b916a..1b720737 100644 --- a/docs/development/CONFIG_FIELDS.md +++ b/docs/development/CONFIG_FIELDS.md @@ -693,4 +693,4 @@ Notes: - [CONFIG_FLOW.md](./CONFIG_FLOW.md) - Complete config system guide - [ARCHITECTURE.md](./ARCHITECTURE.md) - Technical architecture -- [BUGS_FIXED.md](./BUGS_FIXED.md) - Bug fixes and testing +- [TESTING.md](./TESTING.md) - Validation matrix and CI guidance diff --git a/docs/development/TESTING.md b/docs/development/TESTING.md index a8f8d1a0..829559f6 100644 --- a/docs/development/TESTING.md +++ b/docs/development/TESTING.md @@ -30,9 +30,27 @@ Recommended validation command before release: ```bash npm run lint npm run typecheck +npm run build npm test +npm run docs:check ``` +Current PR automation is split into required and advisory lanes: + +- Required `required-pr`: aggregates `lint`, `typecheck`, `build`, `unit (linux)`, and `unit (windows)` for the default Node version, plus `docs-sanity` for Markdown changes and `actionlint` when workflows change. +- Required `pr-governance`: enforces the pull request template, compliance checkbox, and a completed live-verification marker for auth/request/storage changes. +- Advisory `PR Advisory`: runs `npm run test:coverage`, a wider compatibility matrix (Ubuntu Node 18 and 22, macOS on the default Node version, and Windows Node 18), and `npm run audit:ci`. + +Notes on the advisory lane: + +- `npm run test:coverage` is currently informational because the repo baseline is below the configured global coverage thresholds. +- `npm run audit:ci` is currently informational because the production dependency audit still reports an unresolved `hono` advisory. + +Maintainer branch protection should require only: + +- `required-pr` +- `pr-governance` + ## Test Scenarios Matrix ### Scenario 1: Default OpenCode Models (No Custom Config) @@ -786,6 +804,6 @@ describe('filterInput', () => { ## See Also -- [IMPLEMENTATION_SUMMARY.md](./IMPLEMENTATION_SUMMARY.md) - Complete summary - [CONFIG_FIELDS.md](./CONFIG_FIELDS.md) - Field usage guide -- [BUGS_FIXED.md](./BUGS_FIXED.md) - Bug analysis +- [CONFIG_FLOW.md](./CONFIG_FLOW.md) - Configuration loading and precedence +- [ARCHITECTURE.md](./ARCHITECTURE.md) - Technical architecture and request flow diff --git a/eslint.config.js b/eslint.config.js index d038ed8f..c326b456 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -3,7 +3,7 @@ import tsparser from "@typescript-eslint/parser"; export default [ { - ignores: ["dist/**", "node_modules/**", "winston/**", "*.cjs", "*.mjs"], + ignores: ["coverage/**", "dist/**", "node_modules/**", "winston/**", "*.cjs", "*.mjs"], }, { files: ["index.ts", "lib/**/*.ts"], diff --git a/package.json b/package.json index 3517fc83..47875113 100644 --- a/package.json +++ b/package.json @@ -47,6 +47,7 @@ "test:ui": "vitest --ui", "test:coverage": "vitest run --coverage", "coverage": "vitest run --coverage", + "docs:check": "node scripts/ci/docs-check.js", "audit:prod": "npm audit --omit=dev --audit-level=high", "audit:all": "npm audit --audit-level=high", "audit:dev:allowlist": "node scripts/audit-dev-allowlist.js", diff --git a/scripts/ci/docs-check.js b/scripts/ci/docs-check.js new file mode 100644 index 00000000..c5b8ef1d --- /dev/null +++ b/scripts/ci/docs-check.js @@ -0,0 +1,425 @@ +import { execFileSync } from "node:child_process"; +import { readFileSync } from "node:fs"; +import { access, readdir, readFile, stat } from "node:fs/promises"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +const DEFAULT_FILES = ["AGENTS.md", "CHANGELOG.md", "CODE_OF_CONDUCT.md", "CONTRIBUTING.md", "README.md", "SECURITY.md"]; +const DEFAULT_DIRS = [".github", "config", "docs", "test"]; +const MARKDOWN_EXTENSIONS = new Set([".md", ".markdown"]); +const IGNORED_DIRS = new Set([".git", ".github/workflows", ".omx", "dist", "node_modules", "tmp"]); +const MARKDOWN_PATH_ESCAPE_PATTERN = /\\([\x20-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E])/g; +const __filename = fileURLToPath(import.meta.url); +const repositorySlugCache = new Map(); + +function getRootDir() { + return process.cwd(); +} + +export function normalizePathForCompare(targetPath) { + const resolved = path.resolve(targetPath); + return process.platform === "win32" ? resolved.toLowerCase() : resolved; +} + +function normalizeReferenceLabel(label) { + return label.trim().replace(/\s+/g, " ").toLowerCase(); +} + +function unescapeMarkdownPathTarget(target) { + return target.replace(MARKDOWN_PATH_ESCAPE_PATTERN, "$1"); +} + +function extractRepositorySlug(repositoryValue) { + if (!repositoryValue) return null; + + const normalizedValue = repositoryValue + .trim() + .replace(/^git\+/, "") + .replace(/^git@github\.com:/i, "https://github.com/") + .replace(/^ssh:\/\/git@github\.com\//i, "https://github.com/") + .replace(/\.git$/i, ""); + + try { + const url = new URL(normalizedValue); + if (!["github.com", "www.github.com"].includes(url.hostname)) return null; + + const match = url.pathname.match(/^\/([^/]+)\/([^/]+?)\/?$/); + if (!match) return null; + return `${match[1]}/${match[2]}`; + } catch { + const match = normalizedValue.match(/github\.com[:/]([^/]+)\/([^/]+?)(?:\/)?$/i); + if (!match) return null; + return `${match[1]}/${match[2]}`; + } +} + +function getRepositorySlug(rootDir = getRootDir()) { + const githubRepository = process.env.GITHUB_REPOSITORY?.trim(); + const cacheKey = `${normalizePathForCompare(rootDir)}::${githubRepository ?? ""}`; + if (repositorySlugCache.has(cacheKey)) { + return repositorySlugCache.get(cacheKey); + } + + let repositorySlug = null; + if (githubRepository && /^[^/]+\/[^/]+$/.test(githubRepository)) { + repositorySlug = githubRepository; + } else { + try { + const packageJson = JSON.parse(readFileSync(path.join(rootDir, "package.json"), "utf8")); + const repositoryField = + typeof packageJson.repository === "string" ? packageJson.repository : packageJson.repository?.url; + repositorySlug = extractRepositorySlug(repositoryField); + } catch { + // Ignore package.json lookup failures and fall back to git metadata. + } + + if (!repositorySlug) { + try { + const remoteUrl = execFileSync("git", ["config", "--get", "remote.origin.url"], { + cwd: rootDir, + encoding: "utf8", + stdio: ["ignore", "pipe", "ignore"], + }).trim(); + repositorySlug = extractRepositorySlug(remoteUrl); + } catch { + repositorySlug = null; + } + } + } + + repositorySlugCache.set(cacheKey, repositorySlug); + return repositorySlug; +} + +function normalizeLinkTarget(rawTarget) { + if (!rawTarget) return null; + + let target = rawTarget.trim(); + if (!target) return null; + + const angleTargetWithOptionalTitle = target.match(/^<([^>]+)>(?:\s+["'(].*)?$/); + if (angleTargetWithOptionalTitle?.[1]) { + target = angleTargetWithOptionalTitle[1].trim(); + } else { + const spacedTarget = target.match(/^(\S+)\s+["'(].*$/); + if (spacedTarget?.[1]) { + target = spacedTarget[1]; + } + + if (target.startsWith("<") && target.endsWith(">")) { + target = target.slice(1, -1).trim(); + } + } + + target = unescapeMarkdownPathTarget(target); + return target || null; +} + +async function exists(targetPath) { + try { + await access(targetPath); + return true; + } catch { + return false; + } +} + +async function getPathType(targetPath) { + try { + const metadata = await stat(targetPath); + if (metadata.isDirectory()) return "directory"; + if (metadata.isFile()) return "file"; + return "other"; + } catch { + return "missing"; + } +} + +async function walkMarkdownFiles(dirPath, rootDir = getRootDir()) { + const entries = await readdir(dirPath, { withFileTypes: true }); + const files = []; + + for (const entry of entries) { + const absolutePath = path.join(dirPath, entry.name); + const relativePath = path.relative(rootDir, absolutePath).replace(/\\/g, "/"); + + if (entry.isDirectory()) { + if (IGNORED_DIRS.has(relativePath) || IGNORED_DIRS.has(entry.name)) continue; + files.push(...(await walkMarkdownFiles(absolutePath, rootDir))); + continue; + } + + if (MARKDOWN_EXTENSIONS.has(path.extname(entry.name).toLowerCase())) { + files.push(absolutePath); + } + } + + return files; +} + +export async function collectMarkdownFiles(inputPaths, rootDir = getRootDir()) { + const resolved = new Set(); + + if (inputPaths.length > 0) { + for (const inputPath of inputPaths) { + const absolutePath = path.resolve(rootDir, inputPath); + if (!(await exists(absolutePath))) continue; + + const pathType = await getPathType(absolutePath); + const extension = path.extname(absolutePath).toLowerCase(); + if (pathType === "file" && MARKDOWN_EXTENSIONS.has(extension)) { + resolved.add(absolutePath); + continue; + } + + if (pathType !== "directory") continue; + + const nestedFiles = await walkMarkdownFiles(absolutePath, rootDir); + for (const nestedFile of nestedFiles) resolved.add(nestedFile); + } + + return [...resolved].sort(); + } + + for (const file of DEFAULT_FILES) { + const absolutePath = path.join(rootDir, file); + if (await exists(absolutePath)) resolved.add(absolutePath); + } + + for (const dir of DEFAULT_DIRS) { + const absolutePath = path.join(rootDir, dir); + if (!(await exists(absolutePath))) continue; + const nestedFiles = await walkMarkdownFiles(absolutePath, rootDir); + for (const nestedFile of nestedFiles) resolved.add(nestedFile); + } + + return [...resolved].sort(); +} + +function extractLinkTarget(markdown, startIndex) { + let depth = 1; + let inAngleTarget = false; + let isEscaped = false; + let target = ""; + + for (let index = startIndex; index < markdown.length; index += 1) { + const char = markdown[index]; + + if (isEscaped) { + target += char; + isEscaped = false; + continue; + } + + if (char === "\\") { + target += char; + isEscaped = true; + continue; + } + + if (inAngleTarget) { + target += char; + if (char === ">") inAngleTarget = false; + continue; + } + + if (char === "<" && target.trim().length === 0) { + target += char; + inAngleTarget = true; + continue; + } + + if (char === "(") { + target += char; + depth += 1; + continue; + } + + if (char === ")") { + depth -= 1; + if (depth === 0) { + return target; + } + target += char; + continue; + } + + target += char; + } + + return null; +} + +export function extractMarkdownLinks(markdown) { + const stripMarkdownCode = (source) => { + const output = []; + const lines = source.split(/\r?\n/); + let inFence = false; + let fenceChar = ""; + let fenceLength = 0; + + for (const line of lines) { + if (!inFence) { + const openingFence = line.match(/^(?: {0,3})(`{3,}|~{3,})[^\n]*$/); + if (openingFence) { + inFence = true; + fenceChar = openingFence[1][0]; + fenceLength = openingFence[1].length; + output.push(""); + continue; + } + + output.push(line); + continue; + } + + const closingFence = new RegExp(`^(?: {0,3})${fenceChar}{${fenceLength},}[^\\n]*$`); + if (closingFence.test(line)) { + inFence = false; + fenceChar = ""; + fenceLength = 0; + } + + output.push(""); + } + + return output.join("\n").replace(/(`+)([^`\n]|`(?!\1))*\1/g, ""); + }; + + const stripped = stripMarkdownCode(markdown.replace(//g, "")); + const openerPattern = /!?\[[^\]]*]\(/g; + const referencePattern = /!?\[([^\]]+)]\[([^\]]*)]/g; + const shortcutReferencePattern = /(? 0) { + console.error("docs-check found broken documentation links:"); + for (const failure of failures) { + console.error(`- ${failure}`); + } + process.exitCode = 1; + return; + } + + console.log(`docs-check: verified ${files.length} markdown file(s)`); +} + +const isDirectRun = process.argv[1] + ? normalizePathForCompare(process.argv[1]) === normalizePathForCompare(__filename) + : false; + +if (isDirectRun) { + await main(); +} diff --git a/test/docs-check.test.ts b/test/docs-check.test.ts new file mode 100644 index 00000000..52842a61 --- /dev/null +++ b/test/docs-check.test.ts @@ -0,0 +1,596 @@ +import { execFile } from "node:child_process"; +import { mkdir, mkdtemp, readFile, rm, writeFile } from "node:fs/promises"; +import { tmpdir } from "node:os"; +import path from "node:path"; +import { setTimeout as delay } from "node:timers/promises"; +import { promisify } from "node:util"; +import { afterEach, describe, expect, it } from "vitest"; + +const tempRoots: string[] = []; +const TEMP_CLEANUP_DELAYS_MS = [100, 500, 2000]; +const TEMP_CLEANUP_ATTEMPTS = TEMP_CLEANUP_DELAYS_MS.length + 1; +const DOCS_CHECK_SUBPROCESS_RETRY_DELAYS_MS = [100, 500, 2000]; +const DOCS_CHECK_SUBPROCESS_ATTEMPTS = + DOCS_CHECK_SUBPROCESS_RETRY_DELAYS_MS.length + 1; +const DOCS_CHECK_SUBPROCESS_TIMEOUT_MS = 15_000; +const execFileAsync = promisify(execFile); + +async function cleanupTempRoot(root: string) { + for (let attempt = 1; attempt <= TEMP_CLEANUP_ATTEMPTS; attempt += 1) { + try { + await rm(root, { recursive: true, force: true }); + return; + } catch (error) { + if (attempt === TEMP_CLEANUP_ATTEMPTS) { + const message = error instanceof Error ? error.message : String(error); + console.warn(`[docs-check test] failed to clean up ${root} after ${TEMP_CLEANUP_ATTEMPTS} attempts: ${message}`); + return; + } + + await delay(TEMP_CLEANUP_DELAYS_MS[attempt - 1] ?? TEMP_CLEANUP_DELAYS_MS.at(-1) ?? 100); + } + } +} + +afterEach(async () => { + await Promise.all(tempRoots.splice(0).map((root) => cleanupTempRoot(root))); +}); + +async function writeFixtureFiles(root: string, files: Record) { + tempRoots.push(root); + + for (const [relativePath, contents] of Object.entries(files)) { + const absolutePath = path.join(root, relativePath); + await mkdir(path.dirname(absolutePath), { recursive: true }); + await writeFile(absolutePath, contents, "utf8"); + } +} + +function isTransientDocsCheckSubprocessError(error: unknown) { + const details = [error instanceof Error ? error.message : String(error)]; + if (error && typeof error === "object") { + const typedError = error as { stderr?: string; stdout?: string }; + if (typedError.stderr) details.push(typedError.stderr); + if (typedError.stdout) details.push(typedError.stdout); + } + + return /\b(EPERM|EBUSY|EACCES)\b/i.test(details.join("\n")); +} + +async function runDocsCheckSubprocess( + scriptPath: string, + args: string[], + options: Parameters[2], +) { + for ( + let attempt = 1; + attempt <= DOCS_CHECK_SUBPROCESS_ATTEMPTS; + attempt += 1 + ) { + try { + return await execFileAsync(process.execPath, [scriptPath, ...args], options); + } catch (error) { + if ( + process.platform !== "win32" || + attempt === DOCS_CHECK_SUBPROCESS_ATTEMPTS || + !isTransientDocsCheckSubprocessError(error) + ) { + throw error; + } + + await delay( + DOCS_CHECK_SUBPROCESS_RETRY_DELAYS_MS[attempt - 1] ?? + DOCS_CHECK_SUBPROCESS_RETRY_DELAYS_MS.at(-1) ?? + 100, + ); + } + } + + throw new Error("docs-check subprocess retry loop exhausted unexpectedly"); +} + +async function createRepoFixture(files: Record) { + // docs-check resolves local links against process.cwd(), so fixtures must live + // under the repo root for relative-link validation to exercise real behavior. + // .gitignore excludes tmp/ and tmp* so a leftover retry-cleanup fixture does + // not pollute git status if Windows holds a transient lock on removal. + const repoTempDir = path.join(process.cwd(), "tmp"); + await mkdir(repoTempDir, { recursive: true }); + + const root = await mkdtemp(path.join(repoTempDir, "docs-check-")); + await writeFixtureFiles(root, files); + + return { root }; +} + +async function createExternalFixture(files: Record) { + // Workflow badge fallback tests need a directory outside the repo so the git + // remote lookup can cleanly fail when package metadata is absent. + const root = await mkdtemp(path.join(tmpdir(), "docs-check-external-")); + await writeFixtureFiles(root, files); + + return { root }; +} + +async function createDocsFixture(markdown = "# Guide\n") { + const { root } = await createRepoFixture({ + "docs/guide.md": markdown, + "docs/targets/exists.md": "# Target\n", + }); + + const docsFile = path.join(root, "docs", "guide.md"); + + return { docsFile, root }; +} + +describe("docs-check script", () => { + it("keeps balanced parentheses inside markdown link targets", async () => { + const { extractMarkdownLinks } = await import("../scripts/ci/docs-check.js"); + + const markdown = "[Config Guide](docs/guides/config(v2).md)"; + + expect(extractMarkdownLinks(markdown)).toEqual(["docs/guides/config(v2).md"]); + }); + + it("skips anchor-only, external, and site-root-prefixed links", async () => { + const { validateLink } = await import("../scripts/ci/docs-check.js"); + const { docsFile } = await createDocsFixture(); + + await expect(validateLink(docsFile, "#section")).resolves.toBeNull(); + await expect(validateLink(docsFile, "https://example.com/docs")).resolves.toBeNull(); + await expect(validateLink(docsFile, "/docs/development/CONFIG_FIELDS.md")).resolves.toBeNull(); + }); + + it("requires an absolute markdown file path", async () => { + const { validateLink } = await import("../scripts/ci/docs-check.js"); + + await expect(validateLink("docs/guide.md", "./targets/exists.md", process.cwd())).rejects.toThrow( + 'validateLink: filePath must be absolute, got "docs/guide.md"', + ); + }); + + it("reports missing workflow badge targets", async () => { + const { validateLink } = await import("../scripts/ci/docs-check.js"); + const { docsFile } = await createDocsFixture(); + + await expect( + validateLink( + docsFile, + "https://github.com/ndycode/oc-chatgpt-multi-auth/actions/workflows/does-not-exist.yml/badge.svg", + ), + ).resolves.toBe("Missing workflow referenced by GitHub Actions badge/link: does-not-exist.yml"); + await expect( + validateLink( + docsFile, + "https://github.com/NdyCode/OC-ChatGPT-Multi-Auth/actions/workflows/does-not-exist.yml/badge.svg", + ), + ).resolves.toBe("Missing workflow referenced by GitHub Actions badge/link: does-not-exist.yml"); + await expect( + validateLink( + docsFile, + "https://github.com/octocat/hello-world/actions/workflows/ci.yml/badge.svg", + ), + ).resolves.toBeNull(); + }); + + it("uses package metadata to validate workflow badge targets when GitHub Actions context is unavailable", async () => { + const { validateLink } = await import("../scripts/ci/docs-check.js"); + const { root } = await createExternalFixture({ + "package.json": JSON.stringify( + { + name: "fixture-docs-check", + repository: { + type: "git", + url: "git+https://github.com/example/docs-fixture.git", + }, + }, + null, + 2, + ), + "docs/guide.md": "# Guide\n", + ".github/workflows/ci.yml": "name: CI\non: push\n", + }); + const docsFile = path.join(root, "docs", "guide.md"); + const originalRepository = process.env.GITHUB_REPOSITORY; + delete process.env.GITHUB_REPOSITORY; + + try { + await expect( + validateLink( + docsFile, + "https://github.com/example/docs-fixture/actions/workflows/ci.yml/badge.svg", + root, + ), + ).resolves.toBeNull(); + await expect( + validateLink( + docsFile, + "https://github.com/example/docs-fixture/actions/workflows/missing.yml/badge.svg", + root, + ), + ).resolves.toBe("Missing workflow referenced by GitHub Actions badge/link: missing.yml"); + } finally { + if (originalRepository === undefined) { + delete process.env.GITHUB_REPOSITORY; + } else { + process.env.GITHUB_REPOSITORY = originalRepository; + } + } + }); + + it("memoizes repository metadata per root during workflow badge validation", async () => { + const { validateLink } = await import("../scripts/ci/docs-check.js"); + const { root } = await createExternalFixture({ + "package.json": JSON.stringify( + { + name: "fixture-docs-check", + repository: { + type: "git", + url: "git+https://github.com/example/docs-fixture.git", + }, + }, + null, + 2, + ), + "docs/guide.md": "# Guide\n", + }); + const docsFile = path.join(root, "docs", "guide.md"); + const originalRepository = process.env.GITHUB_REPOSITORY; + delete process.env.GITHUB_REPOSITORY; + + try { + await expect( + validateLink( + docsFile, + "https://github.com/example/docs-fixture/actions/workflows/missing.yml/badge.svg", + root, + ), + ).resolves.toBe("Missing workflow referenced by GitHub Actions badge/link: missing.yml"); + + await rm(path.join(root, "package.json")); + + await expect( + validateLink( + docsFile, + "https://github.com/example/docs-fixture/actions/workflows/missing.yml/badge.svg", + root, + ), + ).resolves.toBe("Missing workflow referenced by GitHub Actions badge/link: missing.yml"); + } finally { + if (originalRepository === undefined) { + delete process.env.GITHUB_REPOSITORY; + } else { + process.env.GITHUB_REPOSITORY = originalRepository; + } + } + }); + + it("skips workflow badge validation when repository metadata cannot be resolved", async () => { + const { validateLink } = await import("../scripts/ci/docs-check.js"); + const { root } = await createExternalFixture({ + "docs/guide.md": "# Guide\n", + }); + const docsFile = path.join(root, "docs", "guide.md"); + const originalRepository = process.env.GITHUB_REPOSITORY; + delete process.env.GITHUB_REPOSITORY; + + try { + await expect( + validateLink( + docsFile, + "https://github.com/example/docs-fixture/actions/workflows/ci.yml/badge.svg", + root, + ), + ).resolves.toBeNull(); + } finally { + if (originalRepository === undefined) { + delete process.env.GITHUB_REPOSITORY; + } else { + process.env.GITHUB_REPOSITORY = originalRepository; + } + } + }); + + it("resolves relative local targets from the markdown file directory", async () => { + const { validateLink } = await import("../scripts/ci/docs-check.js"); + const { docsFile, root } = await createDocsFixture(); + + await expect(validateLink(docsFile, "./targets/exists.md")).resolves.toBeNull(); + await expect(validateLink(docsFile, "./targets/exists.md", root)).resolves.toBeNull(); + await expect(validateLink(docsFile, "./targets/missing.md")).resolves.toBe("Missing local target: ./targets/missing.md"); + await expect(validateLink(docsFile, "../../../../outside.md")).resolves.toBe( + "Local target escapes repository root: ../../../../outside.md", + ); + }); + + it("decodes URL-escaped local paths before checking the filesystem", async () => { + const { validateLink } = await import("../scripts/ci/docs-check.js"); + const { root } = await createRepoFixture({ + "docs/guide.md": "[Space](./My%20Guide.md)\n[Literal](./bad%2Gname.md)\n", + "docs/My Guide.md": "# Decoded path target\n", + "docs/bad%2Gname.md": "# Literal percent target\n", + }); + const docsFile = path.join(root, "docs", "guide.md"); + + await expect(validateLink(docsFile, "./My%20Guide.md", root)).resolves.toBeNull(); + await expect(validateLink(docsFile, "./bad%2Gname.md", root)).resolves.toBeNull(); + }); + + it("unescapes markdown-escaped local targets before checking the filesystem", async () => { + const { extractMarkdownLinks, validateLink } = await import("../scripts/ci/docs-check.js"); + const { root } = await createRepoFixture({ + "docs/guide.md": "[Escaped](./array\\[1\\]\\ \\(v2\\).md)\n", + "docs/array[1] (v2).md": "# Escaped target\n", + }); + const docsFile = path.join(root, "docs", "guide.md"); + const markdown = await readFile(docsFile, "utf8"); + const [linkTarget] = extractMarkdownLinks(markdown); + + expect(linkTarget).toBe("./array[1] (v2).md"); + await expect(validateLink(docsFile, linkTarget, root)).resolves.toBeNull(); + }); + + it("normalizes direct-run paths consistently for the current platform", async () => { + const { normalizePathForCompare } = await import("../scripts/ci/docs-check.js"); + + const input = process.platform === "win32" ? "C:\\Temp\\Example\\..\\Test.js" : "./scripts/../README.md"; + const resolved = path.resolve(input); + const expected = process.platform === "win32" ? resolved.toLowerCase() : resolved; + + expect(normalizePathForCompare(input)).toBe(expected); + }); + + it("extracts reference-style definitions so missing targets are still caught", async () => { + const { extractMarkdownLinks, validateLink } = await import("../scripts/ci/docs-check.js"); + const { docsFile } = await createDocsFixture("[Config Guide][config]\n\n[config]: ./targets/missing.md\n"); + const markdown = await readFile(docsFile, "utf8"); + const [referenceTarget] = extractMarkdownLinks(markdown); + + expect(referenceTarget).toBe("./targets/missing.md"); + await expect(validateLink(docsFile, referenceTarget)).resolves.toBe("Missing local target: ./targets/missing.md"); + }); + + it("extracts shortcut reference links so missing targets are still caught", async () => { + const { extractMarkdownLinks, validateLink } = await import("../scripts/ci/docs-check.js"); + const { docsFile } = await createDocsFixture("[config]\n\n[config]: ./targets/missing.md\n"); + const markdown = await readFile(docsFile, "utf8"); + const [referenceTarget] = extractMarkdownLinks(markdown); + + expect(referenceTarget).toBe("./targets/missing.md"); + await expect(validateLink(docsFile, referenceTarget)).resolves.toBe("Missing local target: ./targets/missing.md"); + }); + + it("does not treat inline or full reference links as shortcut references", async () => { + const { extractMarkdownLinks } = await import("../scripts/ci/docs-check.js"); + + const markdown = [ + "[Inline](./targets/inline.md)", + "[Config][cfg]", + "", + "[inline]: ./targets/inline-shortcut.md", + "[cfg]: ./targets/full.md", + "[config]: ./targets/full-shortcut.md", + "", + ].join("\n"); + + expect(extractMarkdownLinks(markdown)).toEqual(["./targets/inline.md", "./targets/full.md"]); + }); + + it("ignores links that only appear inside HTML comments", async () => { + const { extractMarkdownLinks } = await import("../scripts/ci/docs-check.js"); + + const markdown = "\n[Config Guide](./targets/exists.md)\n"; + + expect(extractMarkdownLinks(markdown)).toEqual(["./targets/exists.md"]); + }); + + it("ignores links that only appear inside tilde-fenced code blocks", async () => { + const { extractMarkdownLinks } = await import("../scripts/ci/docs-check.js"); + + const markdown = "~~~bash\n[missing](./targets/missing.md)\n~~~\n[Config Guide](./targets/exists.md)\n"; + + expect(extractMarkdownLinks(markdown)).toEqual(["./targets/exists.md"]); + }); + + it("ignores links after inner fences that appear inside a larger fenced code block", async () => { + const { extractMarkdownLinks } = await import("../scripts/ci/docs-check.js"); + + const markdown = [ + "````markdown", + "```", + "Use fenced blocks like:", + "```yaml", + "key: value", + "```", + "The link [guide](./targets/missing.md) is here.", + "```", + "````", + "[Config Guide](./targets/exists.md)", + "", + ].join("\n"); + + expect(extractMarkdownLinks(markdown)).toEqual(["./targets/exists.md"]); + }); + + it("accepts angle-bracket targets that include an optional title", async () => { + const { extractMarkdownLinks, validateLink } = await import("../scripts/ci/docs-check.js"); + const { docsFile } = await createDocsFixture('[Config Guide](<./targets/exists.md> "Config target")\n'); + const markdown = await readFile(docsFile, "utf8"); + const [linkTarget] = extractMarkdownLinks(markdown); + + expect(linkTarget).toBe("./targets/exists.md"); + await expect(validateLink(docsFile, linkTarget)).resolves.toBeNull(); + }); + + it("discovers default markdown files and skips ignored directories", async () => { + const { collectMarkdownFiles } = await import("../scripts/ci/docs-check.js"); + const { root } = await createRepoFixture({ + "AGENTS.md": "# Instructions\n", + "README.md": "# Root\n", + "CODE_OF_CONDUCT.md": "# Code of Conduct\n", + "CONTRIBUTING.md": "# Contributing\n", + "SECURITY.md": "# Security\n", + "CHANGELOG.md": "# Changelog\n", + ".github/pull_request_template.md": "# PR Template\n", + ".github/workflows/ignored.md": "# Ignored workflow doc\n", + "config/README.md": "# Config\n", + "docs/guide.md": "# Guide\n", + "docs/sub/nested.markdown": "# Nested\n", + "test/AGENTS.md": "# Test instructions\n", + "notes/outside.md": "# Outside default dirs\n", + "tmp/ignored.md": "# Ignored temp\n", + "dist/ignored.md": "# Ignored dist\n", + "node_modules/pkg/ignored.md": "# Ignored dependency\n", + }); + + const discoveredFiles = await collectMarkdownFiles([], root); + const relativeDiscoveredFiles = discoveredFiles.map((filePath: string) => + path.relative(root, filePath).replace(/\\/g, "/"), + ); + + expect(relativeDiscoveredFiles).toEqual([ + ".github/pull_request_template.md", + "AGENTS.md", + "CHANGELOG.md", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.md", + "README.md", + "SECURITY.md", + "config/README.md", + "docs/guide.md", + "docs/sub/nested.markdown", + "test/AGENTS.md", + ]); + }); + + it("collects only explicitly requested markdown files or directories", async () => { + const { collectMarkdownFiles } = await import("../scripts/ci/docs-check.js"); + const { root } = await createRepoFixture({ + "README.md": "# Root\n", + "docs/guide.md": "# Guide\n", + "docs/sub/nested.markdown": "# Nested\n", + "notes/extra.md": "# Extra\n", + }); + + const explicitFile = await collectMarkdownFiles(["README.md"], root); + const explicitDirectory = await collectMarkdownFiles(["docs"], root); + + expect(explicitFile.map((filePath: string) => path.relative(root, filePath).replace(/\\/g, "/"))).toEqual(["README.md"]); + expect(explicitDirectory.map((filePath: string) => path.relative(root, filePath).replace(/\\/g, "/"))).toEqual([ + "docs/guide.md", + "docs/sub/nested.markdown", + ]); + }); + + it("silently skips missing explicit paths", async () => { + const { collectMarkdownFiles } = await import("../scripts/ci/docs-check.js"); + const { root } = await createRepoFixture({ + "docs/guide.md": "# Guide\n", + }); + + await expect(collectMarkdownFiles(["missing.md", "missing-dir"], root)).resolves.toEqual([]); + }); + + it("runs the direct docs-check pipeline for an explicit fixture path", async () => { + const { root } = await createRepoFixture({ + "docs/guide.md": "[Target](./targets/exists.md)\n", + "docs/targets/exists.md": "# Target\n", + }); + const scriptPath = path.resolve(process.cwd(), "scripts/ci/docs-check.js"); + const relativeFixtureRoot = path.relative(process.cwd(), root).replace(/\\/g, "/"); + + const { stdout, stderr } = await runDocsCheckSubprocess(scriptPath, [relativeFixtureRoot], { + cwd: process.cwd(), + timeout: DOCS_CHECK_SUBPROCESS_TIMEOUT_MS, + }); + + expect(stdout).toContain("docs-check: verified 2 markdown file(s)"); + expect(stderr).toBe(""); + }); + + it("exits cleanly when no markdown files are found", async () => { + const { root } = await createRepoFixture({}); + const scriptPath = path.resolve(process.cwd(), "scripts/ci/docs-check.js"); + const relativeFixtureRoot = path.relative(process.cwd(), root).replace(/\\/g, "/"); + + const { stdout, stderr } = await runDocsCheckSubprocess(scriptPath, [relativeFixtureRoot], { + cwd: process.cwd(), + timeout: DOCS_CHECK_SUBPROCESS_TIMEOUT_MS, + }); + + expect(stdout).toContain("docs-check: no markdown files found"); + expect(stderr).toBe(""); + }); + + it("runs the direct docs-check pipeline in default-scan mode", async () => { + const { root } = await createRepoFixture({ + "README.md": "# Root\n", + "docs/guide.md": "[Target](./targets/exists.md)\n", + "docs/targets/exists.md": "# Target\n", + }); + const scriptPath = path.resolve(process.cwd(), "scripts/ci/docs-check.js"); + + const { stdout, stderr } = await runDocsCheckSubprocess(scriptPath, [], { + cwd: root, + timeout: DOCS_CHECK_SUBPROCESS_TIMEOUT_MS, + }); + + expect(stdout).toContain("docs-check: verified 3 markdown file(s)"); + expect(stderr).toBe(""); + }); + + it("exits with an error when the direct docs-check pipeline finds broken links", async () => { + const { root } = await createRepoFixture({ + "docs/guide.md": "[Missing](./targets/missing.md)\n", + }); + const scriptPath = path.resolve(process.cwd(), "scripts/ci/docs-check.js"); + const relativeFixtureRoot = path.relative(process.cwd(), root).replace(/\\/g, "/"); + let failure: (Error & { code?: number; stderr?: string; stdout?: string }) | null = null; + + try { + await runDocsCheckSubprocess(scriptPath, [relativeFixtureRoot], { + cwd: process.cwd(), + timeout: DOCS_CHECK_SUBPROCESS_TIMEOUT_MS, + }); + } catch (error) { + if (error instanceof Error) { + failure = error as Error & { code?: number; stderr?: string; stdout?: string }; + } else { + throw error; + } + } + + expect(failure).not.toBeNull(); + expect(failure?.code).toBe(1); + expect(failure?.stderr).toContain("docs-check found broken documentation links:"); + expect(failure?.stderr).toContain("docs/guide.md: Missing local target: ./targets/missing.md (./targets/missing.md)"); + }); + + it("exits with an error when the direct docs-check pipeline finds a broken workflow badge", async () => { + const { root } = await createRepoFixture({ + "docs/guide.md": + "[CI](https://github.com/ndycode/oc-chatgpt-multi-auth/actions/workflows/does-not-exist.yml/badge.svg)\n", + }); + const scriptPath = path.resolve(process.cwd(), "scripts/ci/docs-check.js"); + const relativeFixtureRoot = path.relative(process.cwd(), root).replace(/\\/g, "/"); + let failure: (Error & { code?: number; stderr?: string; stdout?: string }) | null = null; + + try { + await runDocsCheckSubprocess(scriptPath, [relativeFixtureRoot], { + cwd: process.cwd(), + timeout: DOCS_CHECK_SUBPROCESS_TIMEOUT_MS, + }); + } catch (error) { + if (error instanceof Error) { + failure = error as Error & { code?: number; stderr?: string; stdout?: string }; + } else { + throw error; + } + } + + expect(failure).not.toBeNull(); + expect(failure?.code).toBe(1); + expect(failure?.stderr).toContain("docs-check found broken documentation links:"); + expect(failure?.stderr).toContain("Missing workflow referenced by GitHub Actions badge/link: does-not-exist.yml"); + }); +});