name: Skill Quality Report — Nightly Scan on: schedule: - cron: "0 3 * * *" # 3:00 AM UTC daily workflow_dispatch: # allow manual trigger permissions: contents: read discussions: write issues: write # fallback if Discussions are not enabled jobs: nightly-scan: runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: fetch-depth: 0 # full history for git-log author fallback # ── Download & cache skill-validator ────────────────────────── - name: Get cache key date id: cache-date run: echo "date=$(date +%Y-%m-%d)" >> "$GITHUB_OUTPUT" - name: Restore skill-validator from cache id: cache-sv uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: .skill-validator key: skill-validator-linux-x64-${{ steps.cache-date.outputs.date }} restore-keys: | skill-validator-linux-x64- - name: Download skill-validator if: steps.cache-sv.outputs.cache-hit != 'true' run: | mkdir -p .skill-validator curl -fsSL \ "https://github.com/dotnet/skills/releases/download/skill-validator-nightly/skill-validator-linux-x64.tar.gz" \ -o .skill-validator/skill-validator-linux-x64.tar.gz tar -xzf .skill-validator/skill-validator-linux-x64.tar.gz -C .skill-validator rm .skill-validator/skill-validator-linux-x64.tar.gz chmod +x .skill-validator/skill-validator - name: Save skill-validator to cache if: steps.cache-sv.outputs.cache-hit != 'true' uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: .skill-validator key: skill-validator-linux-x64-${{ steps.cache-date.outputs.date }} # ── Run full scan ───────────────────────────────────────────── - name: Run skill-validator check on all skills id: check-skills run: | set +e set -o pipefail .skill-validator/skill-validator check \ --skills ./skills \ --verbose \ 2>&1 | tee sv-skills-output.txt echo "exit_code=${PIPESTATUS[0]}" >> "$GITHUB_OUTPUT" set +o pipefail set -e - name: Run skill-validator check on all agents id: check-agents run: | set +e set -o pipefail AGENT_FILES=$(find agents -name '*.agent.md' -type f 2>/dev/null | tr '\n' ' ') if [ -n "$AGENT_FILES" ]; then .skill-validator/skill-validator check \ --agents $AGENT_FILES \ --verbose \ 2>&1 | tee sv-agents-output.txt echo "exit_code=${PIPESTATUS[0]}" >> "$GITHUB_OUTPUT" else echo "No agent files found." echo "" > sv-agents-output.txt echo "exit_code=0" >> "$GITHUB_OUTPUT" fi set +o pipefail set -e # ── Build report with author attribution ────────────────────── - name: Build quality report id: report uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 with: script: | const fs = require('fs'); const path = require('path'); const { execSync } = require('child_process'); // ── Parse CODEOWNERS ────────────────────────────────── function parseCodeowners() { const map = new Map(); try { const raw = fs.readFileSync('CODEOWNERS', 'utf8'); for (const line of raw.split('\n')) { const trimmed = line.trim(); if (!trimmed || trimmed.startsWith('#')) continue; const parts = trimmed.split(/\s+/); if (parts.length >= 2) { const filePath = parts[0].replace(/^\//, '').replace(/\/$/, ''); const owners = parts.slice(1).filter(p => p.startsWith('@')); if (owners.length > 0) { map.set(filePath, owners); } } } } catch (e) { console.log('Could not parse CODEOWNERS:', e.message); } return map; } // ── Resolve author for a path ───────────────────────── function resolveAuthor(resourcePath, codeowners) { // CODEOWNERS semantics: last matching rule wins. // Also treat "*" as a match-all default rule. let matchedOwners = null; for (const [pattern, owners] of codeowners) { if ( pattern === '*' || resourcePath === pattern || resourcePath.startsWith(pattern + '/') ) { matchedOwners = owners; } } if (matchedOwners && matchedOwners.length > 0) { return matchedOwners.join(', '); } // Fallback: git log try { const author = execSync( `git log --format='%aN' --follow -1 -- "${resourcePath}"`, { encoding: 'utf8' } ).trim(); return author || 'unknown'; } catch { return 'unknown'; } } // ── Parse skill-validator output ────────────────────── // The output is a text report; we preserve it as-is and // augment it with author info in the summary. const skillsOutput = fs.readFileSync('sv-skills-output.txt', 'utf8').trim(); const agentsOutput = fs.existsSync('sv-agents-output.txt') ? fs.readFileSync('sv-agents-output.txt', 'utf8').trim() : ''; const codeowners = parseCodeowners(); // Count findings const combined = skillsOutput + '\n' + agentsOutput; const errorCount = (combined.match(/\bError\b/gi) || []).length; const warningCount = (combined.match(/\bWarning\b/gi) || []).length; const advisoryCount = (combined.match(/\bAdvisory\b/gi) || []).length; // Count total skills & agents checked let skillDirs = []; try { skillDirs = fs.readdirSync('skills', { withFileTypes: true }) .filter(d => d.isDirectory()) .map(d => d.name); } catch {} let agentFiles = []; try { agentFiles = fs.readdirSync('agents') .filter(f => f.endsWith('.agent.md')); } catch {} // ── Build author-attributed summary ─────────────────── // Extract per-resource blocks from output. The validator // prints skill names as headers — we annotate them with // the resolved owner. function annotateWithAuthors(output, kind) { if (!output) return '_No findings._'; const lines = output.split('\n'); const annotated = []; for (const line of lines) { // Skill names appear as headers, e.g. "## skill-name" or "skill-name:" const headerMatch = line.match(/^(?:#{1,3}\s+)?([a-z0-9][a-z0-9-]+(?:\.[a-z0-9.-]+)?)\b/); if (headerMatch) { const name = headerMatch[1]; const resourcePath = kind === 'skill' ? `skills/${name}` : `agents/${name}.agent.md`; const author = resolveAuthor(resourcePath, codeowners); annotated.push(`${line} — ${author}`); } else { annotated.push(line); } } return annotated.join('\n'); } const today = new Date().toISOString().split('T')[0]; const title = `Skill Quality Report — ${today}`; const body = [ `# ${title}`, '', `**${skillDirs.length} skills** and **${agentFiles.length} agents** scanned.`, '', `| Severity | Count |`, `|----------|-------|`, `| ⛔ Errors | ${errorCount} |`, `| ⚠️ Warnings | ${warningCount} |`, `| ℹ️ Advisories | ${advisoryCount} |`, '', '---', '', '## Skills', '', '
', 'Full skill-validator output for skills', '', '```', annotateWithAuthors(skillsOutput, 'skill'), '```', '', '
', '', '## Agents', '', '
', 'Full skill-validator output for agents', '', '```', annotateWithAuthors(agentsOutput, 'agent'), '```', '', '
', '', '---', '', `_Generated by the [Skill Validator nightly scan](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/workflows/skill-quality-report.yml)._`, ].join('\n'); core.setOutput('title', title); core.setOutput('body_file', 'report-body.md'); // GitHub Issues/Discussions enforce a body size limit on the // UTF-8 payload (~65536 bytes). Use byte-based limits and prefer // shrinking verbose
sections to keep markdown valid. const MAX_BODY_BYTES = 65000; // leave some margin function shrinkDetailsSections(markdown) { return markdown.replace( /[\s\S]*?<\/details>/g, (match, attrs) => { const placeholder = '\nDetails truncated\n\n' + "> Full output was truncated to fit GitHub's body size limit. " + 'See the workflow run for complete output.\n'; return `${placeholder}
`; } ); } function trimToByteLimit(str, maxBytes) { const buf = Buffer.from(str, 'utf8'); if (buf.length <= maxBytes) return str; // Slice bytes and decode, which safely handles multi-byte chars return buf.slice(0, maxBytes).toString('utf8').replace(/\uFFFD$/, ''); } const truncNote = '\n\n> **Note:** Output was truncated to fit GitHub\'s body size limit. See the [workflow run](https://github.com/' + context.repo.owner + '/' + context.repo.repo + '/actions/workflows/skill-quality-report.yml) for full output.\n'; const truncNoteBytes = Buffer.byteLength(truncNote, 'utf8'); let finalBody = body; if (Buffer.byteLength(finalBody, 'utf8') > MAX_BODY_BYTES) { // First try: collapse
sections to reduce size finalBody = shrinkDetailsSections(finalBody); } if (Buffer.byteLength(finalBody, 'utf8') > MAX_BODY_BYTES) { // Last resort: hard byte-trim + truncation note finalBody = trimToByteLimit(finalBody, MAX_BODY_BYTES - truncNoteBytes); } if (Buffer.byteLength(finalBody, 'utf8') < Buffer.byteLength(body, 'utf8')) { finalBody += truncNote; } fs.writeFileSync('report-body.md', finalBody); # ── Create Discussion (preferred) or Issue (fallback) ──────── - name: Create Discussion id: create-discussion continue-on-error: true uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7.1.0 with: script: | const fs = require('fs'); const title = '${{ steps.report.outputs.title }}'.replace(/'/g, "\\'"); const body = fs.readFileSync('report-body.md', 'utf8'); // Find the "Skill Quality Reports" category const categoriesResult = await github.graphql(` query($owner: String!, $repo: String!) { repository(owner: $owner, name: $repo) { id discussionCategories(first: 25) { nodes { id name } } } } `, { owner: context.repo.owner, repo: context.repo.repo, }); const repo = categoriesResult.repository; const categories = repo.discussionCategories.nodes; const category = categories.find(c => c.name === 'Skill Quality Reports' ); if (!category) { core.setFailed('Discussion category "Skill Quality Reports" not found. Falling back to issue.'); return; } await github.graphql(` mutation($repoId: ID!, $categoryId: ID!, $title: String!, $body: String!) { createDiscussion(input: { repositoryId: $repoId, categoryId: $categoryId, title: $title, body: $body }) { discussion { url } } } `, { repoId: repo.id, categoryId: category.id, title: title, body: body, }); console.log('Discussion created successfully.'); - name: Fallback — Create Issue if: steps.create-discussion.outcome == 'failure' env: GH_TOKEN: ${{ github.token }} run: | # Create label if it doesn't exist (ignore errors if it already exists) gh label create "skill-quality" --description "Automated skill quality reports" --color "d4c5f9" 2>/dev/null || true gh issue create \ --title "${{ steps.report.outputs.title }}" \ --body-file report-body.md \ --label "skill-quality"