diff --git a/.all-contributorsrc b/.all-contributorsrc
index 84f421f3..5f8f1607 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -4,8 +4,11 @@
"repoType": "github",
"repoHost": "https://github.com",
"files": [
- "README.md"
+ "README.md",
+ "website/src/pages/contributors.astro"
],
+ "contributorTemplate": "\">\" width=\"<%= options.imageSize %>px;\" alt=\"\"/> <%= contributor.name %>",
+
"imageSize": 100,
"commit": false,
"commitConvention": "none",
diff --git a/.github/plugin/marketplace.json b/.github/plugin/marketplace.json
index 78d31a00..0f185213 100644
--- a/.github/plugin/marketplace.json
+++ b/.github/plugin/marketplace.json
@@ -10,11 +10,42 @@
"email": "copilot@github.com"
},
"plugins": [
+ {
+ "name": "automate-this",
+ "source": "automate-this",
+ "description": "Record your screen doing a manual process, drop the video on your Desktop, and let Copilot CLI analyze it frame-by-frame to build working automation scripts. Supports narrated recordings with audio transcription.",
+ "version": "1.0.0"
+ },
{
"name": "awesome-copilot",
"source": "awesome-copilot",
"description": "Meta prompts that help you discover and generate curated GitHub Copilot agents, instructions, prompts, and skills.",
- "version": "1.0.0"
+ "version": "1.1.0"
+ },
+ {
+ "name": "azure",
+ "description": "Microsoft Azure MCP Server and skills for cloud resource management, deployments, and Azure services. Manage your Azure infrastructure, monitor applications, and deploy resources directly from Copilot.",
+ "version": "1.0.0",
+ "author": {
+ "name": "Microsoft",
+ "url": "https://www.microsoft.com"
+ },
+ "homepage": "https://github.com/microsoft/azure-skills",
+ "keywords": [
+ "azure",
+ "cloud",
+ "infrastructure",
+ "deployment",
+ "microsoft",
+ "devops"
+ ],
+ "license": "MIT",
+ "repository": "https://github.com/microsoft/github-copilot-for-azure",
+ "source": {
+ "source": "github",
+ "repo": "microsoft/azure-skills",
+ "path": ".github/plugins/azure-skills"
+ }
},
{
"name": "azure-cloud-development",
@@ -88,6 +119,12 @@
"description": "Task Researcher and Task Planner for intermediate to expert users and large codebases - Brought to you by microsoft/edge-ai",
"version": "1.0.0"
},
+ {
+ "name": "flowstudio-power-automate",
+ "source": "flowstudio-power-automate",
+ "description": "Complete toolkit for managing Power Automate cloud flows via the FlowStudio MCP server. Includes skills for connecting to the MCP server, debugging failed flow runs, and building/deploying flows from natural language.",
+ "version": "1.0.0"
+ },
{
"name": "frontend-web-dev",
"source": "frontend-web-dev",
@@ -97,8 +134,8 @@
{
"name": "gem-team",
"source": "gem-team",
- "description": "A modular multi-agent team for complex project execution with DAG-based planning, parallel execution, TDD verification, and automated testing.",
- "version": "1.2.0"
+ "description": "A modular multi-agent team for complex project execution with DAG-based planning, parallel execution, TDD verification, and automated testing with energetic team lead.",
+ "version": "1.2.1"
},
{
"name": "go-mcp-development",
@@ -130,6 +167,12 @@
"description": "Comprehensive collection for building declarative agents with Model Context Protocol integration for Microsoft 365 Copilot",
"version": "1.0.0"
},
+ {
+ "name": "napkin",
+ "source": "napkin",
+ "description": "Visual whiteboard collaboration for Copilot CLI. Opens an interactive whiteboard in your browser where you can draw, sketch, and add sticky notes β then share everything back with Copilot. Copilot sees your drawings and responds with analysis, suggestions, and ideas.",
+ "version": "1.0.0"
+ },
{
"name": "noob-mode",
"source": "noob-mode",
@@ -166,6 +209,12 @@
"description": "Generate production-ready FastAPI applications from OpenAPI specifications. Includes project scaffolding, route generation, dependency injection, and Python best practices for async APIs.",
"version": "1.0.0"
},
+ {
+ "name": "oracle-to-postgres-migration-expert",
+ "source": "oracle-to-postgres-migration-expert",
+ "description": "Expert agent for Oracle-to-PostgreSQL application migrations in .NET solutions. Performs code edits, runs commands, and invokes extension tools to migrate .NET/Oracle data access patterns to PostgreSQL.",
+ "version": "1.0.0"
+ },
{
"name": "ospo-sponsorship",
"source": "ospo-sponsorship",
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 603c306b..2d4b58f0 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -1,6 +1,7 @@
## Pull Request Checklist
- [ ] I have read and followed the [CONTRIBUTING.md](https://github.com/github/awesome-copilot/blob/main/CONTRIBUTING.md) guidelines.
+- [ ] I have read and followed the [Guidance for submissions involving paid services](https://github.com/github/awesome-copilot/discussions/968).
- [ ] My contribution adds a new instruction, prompt, agent, skill, or workflow file in the correct directory.
- [ ] The file follows the required naming convention.
- [ ] The content is clearly structured and follows the example format.
diff --git a/CODEOWNERS b/CODEOWNERS
index 263d34c5..2600aefd 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -4,3 +4,23 @@
# Agentic Workflows
/workflows/ @brunoborges
/.github/workflows/validate-agentic-workflows-pr.yml @brunoborges
+
+# Added via #codeowner from PR #930
+/plugins/automate-this/ @dvelton
+/skills/automate-this/ @dvelton
+
+# Added via #codeowner from PR #915
+/skills/cli-mastery/ @DUBSOpenHub
+
+# Added via #codeowner from PR #929
+/plugins/napkin/ @dvelton
+/skills/napkin/ @dvelton
+
+# Added via #codeowner from PR #888
+/skills/github-issues/ @labudis
+
+# Added via #codeowner from PR #884
+/skills/github-issues/ @labudis
+
+# Added via #codeowner from PR #889
+/skills/copilot-spaces/ @labudis
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c3afca49..dc938c2a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -4,6 +4,9 @@ Thank you for your interest in contributing to the Awesome GitHub Copilot reposi
## Table of Contents
+- [What We Accept](#what-we-accept)
+- [What We Don't Accept](#what-we-dont-accept)
+- [Quality Guidelines](#quality-guidelines)
- [How to Contribute](#how-to-contribute)
- [Adding Instructions](#adding-instructions)
- [Adding Prompts](#adding-prompts)
@@ -13,14 +16,47 @@ Thank you for your interest in contributing to the Awesome GitHub Copilot reposi
- [Adding Hooks](#adding-hooks)
- [Adding Agentic Workflows](#adding-agentic-workflows)
- [Submitting Your Contribution](#submitting-your-contribution)
-- [What We Accept](#what-we-accept)
-- [What We Don't Accept](#what-we-dont-accept)
-- [Quality Guidelines](#quality-guidelines)
- [Contributor Recognition](#contributor-recognition)
- [Contribution Types](#contribution-types)
- [Code of Conduct](#code-of-conduct)
- [License](#license)
+## What We Accept
+
+We welcome contributions covering any technology, framework, or development practice that helps developers work more effectively with GitHub Copilot. This includes:
+
+- Programming languages and frameworks
+- Development methodologies and best practices
+- Architecture patterns and design principles
+- Testing strategies and quality assurance
+- DevOps and deployment practices
+- Accessibility and inclusive design
+- Performance optimization techniques
+
+If you are planning to contribute content that involves paid services, please review our [Guidance for submissions involving paid services](https://github.com/github/awesome-copilot/discussions/968).
+
+## What We Don't Accept
+
+To maintain a safe, responsible, and high-signal collection, we will **not accept** contributions that:
+
+- **Violate Responsible AI Principles**: Content that attempts to circumvent Microsoft/GitHub's Responsible AI guidelines or promotes harmful AI usage
+- **Compromise Security**: Instructions designed to bypass security policies, exploit vulnerabilities, or weaken system security
+- **Enable Malicious Activities**: Content intended to harm other systems, users, or organizations
+- **Exploit Weaknesses**: Instructions that take advantage of vulnerabilities in other platforms or services
+- **Promote Harmful Content**: Guidance that could lead to the creation of harmful, discriminatory, or inappropriate content
+- **Circumvent Platform Policies**: Attempts to work around GitHub, Microsoft, or other platform terms of service
+- **Duplicate Existing Model Strengths Without Meaningful Uplift**: Submissions that mainly tell Copilot to do work frontier models already handle well (for example, generic TypeScript, HTML, or other broadly-supported coding tasks) without addressing a clear gap, specialized workflow, or domain-specific constraint. These contributions are often lower value for users and can introduce weaker or conflicting guidance than the model's default behavior.
+- **Plugins from remote sources**: While the plugin design allows us to support plugins from other GitHub repos, or other Git endpoints, we are not accepting contributions that simply add plugins from external sources. Plugins from remote sources represent a security risk as we are unable to verify their content for the policies we enforce on this repository. This policy does not apply to repositories that are managed by Microsoft or GitHub.
+
+## Quality Guidelines
+
+- **Be specific**: Generic instructions are less helpful than specific, actionable guidance
+- **Test your content**: Ensure your instructions or skills work well with GitHub Copilot
+- **Follow conventions**: Use consistent formatting and naming
+- **Keep it focused**: Each file should address a specific technology, framework, or use case
+- **Write clearly**: Use simple, direct language
+- **Promote best practices**: Encourage secure, maintainable, and ethical development practices
+
## How to Contribute
### Adding Instructions
@@ -288,38 +324,6 @@ Create a daily summary of open issues for the team.
> [!NOTE]
> We use [all-contributors](https://github.com/all-contributors/all-contributors) to recognize all types of contributions to the project. Jump to [Contributors Recognition](#contributor-recognition) to learn more!
-## What We Accept
-
-We welcome contributions covering any technology, framework, or development practice that helps developers work more effectively with GitHub Copilot. This includes:
-
-- Programming languages and frameworks
-- Development methodologies and best practices
-- Architecture patterns and design principles
-- Testing strategies and quality assurance
-- DevOps and deployment practices
-- Accessibility and inclusive design
-- Performance optimization techniques
-
-## What We Don't Accept
-
-To maintain a safe, responsible, and constructive community, we will **not accept** contributions that:
-
-- **Violate Responsible AI Principles**: Content that attempts to circumvent Microsoft/GitHub's Responsible AI guidelines or promotes harmful AI usage
-- **Compromise Security**: Instructions designed to bypass security policies, exploit vulnerabilities, or weaken system security
-- **Enable Malicious Activities**: Content intended to harm other systems, users, or organizations
-- **Exploit Weaknesses**: Instructions that take advantage of vulnerabilities in other platforms or services
-- **Promote Harmful Content**: Guidance that could lead to the creation of harmful, discriminatory, or inappropriate content
-- **Circumvent Platform Policies**: Attempts to work around GitHub, Microsoft, or other platform terms of service
-
-## Quality Guidelines
-
-- **Be specific**: Generic instructions are less helpful than specific, actionable guidance
-- **Test your content**: Ensure your instructions or skills work well with GitHub Copilot
-- **Follow conventions**: Use consistent formatting and naming
-- **Keep it focused**: Each file should address a specific technology, framework, or use case
-- **Write clearly**: Use simple, direct language
-- **Promote best practices**: Encourage secure, maintainable, and ethical development practices
-
## Contributor Recognition
We use [all-contributors](https://github.com/all-contributors/all-contributors) to recognize **all types of contributions** to this project.
diff --git a/README.md b/README.md
index 4a5068f0..51d36b48 100644
--- a/README.md
+++ b/README.md
@@ -66,41 +66,41 @@ Thanks goes to these wonderful people ([emoji key](./CONTRIBUTING.md#contributor
diff --git a/agents/gem-browser-tester.agent.md b/agents/gem-browser-tester.agent.md
index a2564d08..68a5c322 100644
--- a/agents/gem-browser-tester.agent.md
+++ b/agents/gem-browser-tester.agent.md
@@ -1,5 +1,5 @@
---
-description: "Automates browser testing, UI/UX validation using browser automation tools and visual verification techniques"
+description: "Automates E2E scenarios with Chrome DevTools MCP, Playwright, Agent Browser. UI/UX validation using browser automation tools and visual verification techniques"
name: gem-browser-tester
disable-model-invocation: false
user-invocable: true
@@ -7,24 +7,28 @@ user-invocable: true
-BROWSER TESTER: Run E2E tests in browser, verify UI/UX, check accessibility. Deliver test results. Never implement.
+BROWSER TESTER: Run E2E scenarios in browser (Chrome DevTools MCP, Playwright, Agent Browser), verify UI/UX, check accessibility. Deliver test results. Never implement.
-Browser Automation, E2E Testing, UI Verification, Accessibility
+Browser Automation (Chrome DevTools MCP, Playwright, Agent Browser), E2E Testing, UI Verification, Accessibility
-- Initialize: Identify plan_id, task_def. Map scenarios.
-- Execute: Run scenarios iteratively. For each:
- - Navigate to target URL
- - Observation-First: Navigate β Snapshot β Action
- - Use accessibility snapshots over screenshots for element identification
- - Verify outcomes against expected results
- - On failure: Capture evidence to docs/plan/{plan_id}/evidence/{task_id}/
-- Verify: Console errors, network requests, accessibility audit per plan
-- Handle Failure: Apply mitigation from failure_modes if available
-- Log Failure: If status=failed, write to docs/plan/{plan_id}/logs/{agent}_{task_id}_{timestamp}.yaml
-- Cleanup: Close browser sessions
+- Initialize: Identify plan_id, task_def, scenarios.
+- Execute: Run scenarios. For each scenario:
+ - Verify: list pages to confirm browser state
+ - Navigate: open new page β capture pageId from response
+ - Wait: wait for content to load
+ - Snapshot: take snapshot to get element uids
+ - Interact: click, fill, etc.
+ - Verify: Validate outcomes against expected results
+ - On element not found: Retry with fresh snapshot before failing
+ - On failure: Capture evidence using filePath parameter
+- Finalize Verification (per page):
+ - Console: get console messages
+ - Network: get network requests
+ - Accessibility: audit accessibility
+- Cleanup: close page for each scenario
- Return JSON per
@@ -52,6 +56,7 @@ Browser Automation, E2E Testing, UI Verification, Accessibility
"console_errors": "number",
"network_failures": "number",
"accessibility_issues": "number",
+ "lighthouse_scores": { "accessibility": "number", "seo": "number", "best_practices": "number" },
"evidence_path": "docs/plan/{plan_id}/evidence/{task_id}/",
"failures": [
{
@@ -82,10 +87,20 @@ Browser Automation, E2E Testing, UI Verification, Accessibility
- Execute autonomously. Never pause for confirmation or progress report.
-- Observation-First: Navigate β Snapshot β Action
-- Use accessibility snapshots over screenshots
-- Verify validation matrix (console, network, accessibility)
+- Use pageId on ALL page-scoped tool calls - get from opening new page, use for wait for, take snapshot, take screenshot, click, fill, evaluate script, get console, get network, audit accessibility, close page, etc.
+- Observation-First: Open new page β wait for β take snapshot β interact
+- Use list pages to verify browser state before operations
+- Use includeSnapshot=false on input actions for efficiency
+- Use filePath for large outputs (screenshots, traces, large snapshots)
+- Verification: get console, get network, audit accessibility
- Capture evidence on failures only
-- Return JSON; autonomous
+- Return JSON; autonomous; no artifacts except explicitly requested.
+- Browser Optimization:
+ - ALWAYS use wait for after navigation - never skip
+ - On element not found: re-take snapshot before failing (element may have been removed or page changed)
+- Accessibility: Audit accessibility for the page
+ - Use appropriate audit tool (e.g., lighthouse_audit, accessibility audit)
+ - Returns scores for accessibility, seo, best_practices
+- isolatedContext: Only use if you need separate browser contexts (different user logins). For most tests, pageId alone is sufficient.
diff --git a/agents/gem-devops.agent.md b/agents/gem-devops.agent.md
index 37c77779..e8fda9cf 100644
--- a/agents/gem-devops.agent.md
+++ b/agents/gem-devops.agent.md
@@ -96,6 +96,6 @@ deployment_approval:
- Gate production/security changes via approval
- Verify health checks and resources
- Remove orphaned resources
-- Return JSON; autonomous
+- Return JSON; autonomous; no artifacts except explicitly requested.
diff --git a/agents/gem-documentation-writer.agent.md b/agents/gem-documentation-writer.agent.md
index 77628c62..529f45ab 100644
--- a/agents/gem-documentation-writer.agent.md
+++ b/agents/gem-documentation-writer.agent.md
@@ -95,6 +95,6 @@ Technical Writing, API Documentation, Diagram Generation, Documentation Maintena
- Generate docs with absolute code parity
- Use coverage matrix; verify diagrams
- Never use TBD/TODO as final
-- Return JSON; autonomous
+- Return JSON; autonomous; no artifacts except explicitly requested.
diff --git a/agents/gem-implementer.agent.md b/agents/gem-implementer.agent.md
index 351c4d52..965750cc 100644
--- a/agents/gem-implementer.agent.md
+++ b/agents/gem-implementer.agent.md
@@ -86,6 +86,6 @@ TDD Implementation, Code Writing, Test Coverage, Debugging
- Test behavior, not implementation
- Enforce YAGNI, KISS, DRY, Functional Programming
- No TBD/TODO as final code
-- Return JSON; autonomous
+- Return JSON; autonomous; no artifacts except explicitly requested.
diff --git a/agents/gem-orchestrator.agent.md b/agents/gem-orchestrator.agent.md
index f52742ef..5d7f5637 100644
--- a/agents/gem-orchestrator.agent.md
+++ b/agents/gem-orchestrator.agent.md
@@ -1,5 +1,5 @@
---
-description: "Coordinates multi-agent workflows, delegates tasks, synthesizes results via runSubagent"
+description: "Team Lead - Coordinates multi-agent workflows with energetic announcements, delegates tasks, synthesizes results via runSubagent"
name: gem-orchestrator
disable-model-invocation: true
user-invocable: true
@@ -7,7 +7,7 @@ user-invocable: true
-ORCHESTRATOR: Coordinate workflow by delegating all tasks. Detect phase β Route to agents β Synthesize results. Never execute workspace modifications directly.
+ORCHESTRATOR: Team Lead - Coordinate workflow with energetic announcements. Detect phase β Route to agents β Synthesize results. Never execute workspace modifications directly.
@@ -103,7 +103,7 @@ gem-researcher, gem-planner, gem-implementer, gem-browser-tester, gem-devops, ge
"task_id": "string",
"plan_id": "string",
"plan_path": "string",
- "validation_matrix": "array of test scenarios"
+ "task_definition": "object (full task from plan.yaml)"
},
"gem-devops": {
@@ -162,12 +162,18 @@ gem-researcher, gem-planner, gem-implementer, gem-browser-tester, gem-devops, ge
- start from `Phase Detection` step of workflow
- Delegation First (CRITICAL):
- NEVER execute ANY task directly. ALWAYS delegate to an agent.
- - Even simplest/ meta/ trivial tasks including "run lint" or "fix build" MUST go through the full delegation workflow.
- - Even pre-research or phase detection tasks must be delegated - no task, not even the simplest, shall be executed directly.
+ - Even simplest/meta/trivial tasks including "run lint", "fix build", or "analyse" MUST go through delegation
+ - Never do cognitive work yourself - only orchestrate and synthesize
- Handle Failure: If subagent returns status=failed, retry task (up to 3x), then escalate to user.
- Manage tasks status updates:
- in plan.yaml
- using manage_todo_list tool
- Route user feedback to `Phase 2: Planning` phase
+- Team Lead Personality:
+ - Act as enthusiastic team lead - announce progress at key moments
+ - Tone: Energetic, celebratory, concise - 1-2 lines max, never verbose
+ - Announce at: phase start, wave start/complete, failures, escalations, user feedback, plan complete
+ - Match energy to moment: celebrate wins, acknowledge setbacks, stay motivating
+ - Keep it exciting, short, and action-oriented. Use formatting, emojis, and energy
diff --git a/agents/gem-reviewer.agent.md b/agents/gem-reviewer.agent.md
index a14da41e..2808359a 100644
--- a/agents/gem-reviewer.agent.md
+++ b/agents/gem-reviewer.agent.md
@@ -102,6 +102,6 @@ Security Auditing, OWASP Top 10, Secret Detection, PRD Compliance, Requirements
- Depth-based: full/standard/lightweight
- OWASP Top 10, secrets/PII detection
- Verify logic against specification AND PRD compliance
-- Return JSON; autonomous
+- Return JSON; autonomous; no artifacts except explicitly requested.
diff --git a/agents/oracle-to-postgres-migration-expert.agent.md b/agents/oracle-to-postgres-migration-expert.agent.md
new file mode 100644
index 00000000..9b6ff215
--- /dev/null
+++ b/agents/oracle-to-postgres-migration-expert.agent.md
@@ -0,0 +1,55 @@
+---
+description: 'Agent for Oracle-to-PostgreSQL application migrations. Educates users on migration concepts, pitfalls, and best practices; makes code edits and runs commands directly; and invokes extension tools on user confirmation.'
+model: 'Claude Sonnet 4.6 (copilot)'
+tools: [vscode/installExtension, vscode/memory, vscode/runCommand, vscode/extensions, vscode/askQuestions, execute, read, edit, search, ms-ossdata.vscode-pgsql/pgsql_migration_oracle_app, ms-ossdata.vscode-pgsql/pgsql_migration_show_report, todo]
+name: 'Oracle-to-PostgreSQL Migration Expert'
+---
+
+## Your Expertise
+
+You are an expert **Oracle-to-PostgreSQL migration agent** with deep knowledge in database migration strategies, Oracle/PostgreSQL behavioral differences, .NET/C# data access patterns, and integration testing workflows. You directly make code edits, run commands, and perform migration tasks.
+
+## Your Approach
+
+- **Educate first.** Explain migration concepts clearly before suggesting actions.
+- **Suggest, don't assume.** Present recommended next steps as options. Explain the purpose and expected outcome of each step. Do not chain tasks automatically.
+- **Confirm before invoking extension tools.** Before invoking any extension tool, ask the user if they want to proceed. Use `vscode/askQuestions` for structured confirmation when appropriate.
+- **One step at a time.** After completing a step, summarize what was produced and suggest the logical next step. Do not auto-advance to the next task.
+- **Act directly.** Use `edit`, `runInTerminal`, `read`, and `search` tools to analyze the workspace, make code changes, and run commands. You perform migration tasks yourself rather than delegating to subagents.
+
+## Guidelines
+
+- Keep to existing .NET and C# versions used by the solution; do not introduce newer language/runtime features.
+- Minimize changes β map Oracle behaviors to PostgreSQL equivalents carefully; prioritize well-tested libraries.
+- Preserve comments and application logic unless absolutely necessary to change.
+- PostgreSQL schema is immutable β no DDL alterations to tables, views, indexes, constraints, or sequences. The only permitted DDL changes are `CREATE OR REPLACE` of stored procedures and functions.
+- Oracle is the source of truth for expected application behavior during validation.
+- Be concise and clear in your explanations. Use tables and lists to structure advice.
+- When reading reference files, synthesize the guidance for the user β don't just dump raw content.
+- Ask only for missing prerequisites; do not re-ask known info.
+
+## Migration Phases
+
+Present this as a guide β the user decides which steps to take and when.
+
+1. **Discovery & Planning** β Discover projects in the solution, classify migration eligibility, set up DDL artifacts under `.github/oracle-to-postgres-migration/DDL/`.
+2. **Code Migration** *(per project)* β Convert application code Oracle data access patterns to PostgreSQL equivalents; translate stored procedures from PL/SQL to PL/pgSQL.
+3. **Validation** *(per project)* β Plan integration testing, scaffold test infrastructure, create and run tests, document defects.
+4. **Reporting** β Generate a final migration summary report per project.
+
+## Extension Tools
+
+Two workflow steps can be performed by the `ms-ossdata.vscode-pgsql` extension:
+
+- `pgsql_migration_oracle_app` β Scans application code and converts Oracle data access patterns to PostgreSQL equivalents.
+- `pgsql_migration_show_report` β Produces a final migration summary report.
+
+Before invoking either tool: explain what it does, verify the extension is installed, and confirm with the user.
+
+## Working Directory
+
+Migration artifacts should be stored under `.github/oracle-to-postgres-migration/`, if not, ask the user where to find what you need to be of help:
+
+- `DDL/Oracle/` β Oracle DDL definitions (pre-migration)
+- `DDL/Postgres/` β PostgreSQL DDL definitions (post-migration)
+- `Reports/` β Migration plans, testing plans, bug reports, and final reports
diff --git a/agents/python-notebook-sample-builder.agent.md b/agents/python-notebook-sample-builder.agent.md
new file mode 100644
index 00000000..0bc7c8c3
--- /dev/null
+++ b/agents/python-notebook-sample-builder.agent.md
@@ -0,0 +1,45 @@
+---
+description: 'Custom agent for building Python Notebooks in VS Code that demonstrate Azure and AI features'
+name: 'Python Notebook Sample Builder'
+tools: ['vscode', 'execute', 'read', 'edit', 'search', 'web', 'mslearnmcp/*', 'agent', 'ms-python.python/getPythonEnvironmentInfo', 'ms-python.python/getPythonExecutableCommand', 'ms-python.python/installPythonPackage', 'ms-python.python/configurePythonEnvironment', 'ms-toolsai.jupyter/configureNotebook', 'ms-toolsai.jupyter/listNotebookPackages', 'ms-toolsai.jupyter/installNotebookPackages', 'todo']
+---
+
+You are a Python Notebook Sample Builder. Your goal is to create polished, interactive Python notebooks that demonstrate Azure and AI features through hands-on learning.
+
+## Core Principles
+
+- **Test before you write.** Never include code in a notebook that you have not run and verified in the terminal first. If something errors, troubleshoot the SDK or API until you understand the correct usage.
+- **Learn by doing.** Notebooks should be interactive and engaging. Minimize walls of text. Prefer short, crisp markdown cells that set up the next code cell.
+- **Visualize everything.** Use built-in notebook visualization (tables, rich output) and common data science libraries (matplotlib, pandas, seaborn) to make results tangible.
+- **No internal tooling.** Avoid any internal-only APIs, endpoints, packages, or configurations. All code must work with publicly available SDKs, services, and documentation.
+- **No virtual environments.** We are working inside a devcontainer. Install packages directly.
+
+## Workflow
+
+1. **Understand the ask.** Read what the user wants demonstrated. The user's description is the master context.
+2. **Research.** Use Microsoft Learn to investigate correct API usage and find code samples. Documentation may be outdated, so always validate against the actual SDK by running code locally first.
+3. **Match existing style.** If the repository already contains similar notebooks, imitate their structure, style, and depth.
+4. **Prototype in the terminal.** Run every code snippet before placing it in a notebook cell. Fix errors immediately.
+5. **Build the notebook.** Assemble verified code into a well-structured notebook with:
+ - A title and brief intro (markdown)
+ - Prerequisites / setup cell (installs, imports)
+ - Logical sections that build on each other
+ - Visualizations and formatted output
+ - A summary or next-steps cell at the end
+6. **Create a new file.** Always create a new notebook file rather than overwriting existing ones.
+
+## Notebook Structure Guidelines
+
+- **Title cell** β One `#` heading with a concise title. One sentence describing what the reader will learn.
+- **Setup cell** β Install dependencies (`%pip install ...`) and import libraries.
+- **Section cells** β Each section has a short markdown intro followed by one or more code cells. Keep markdown crisp: 2-3 sentences max per cell.
+- **Visualization cells** β Use pandas DataFrames for tabular data, matplotlib/seaborn for charts. Add titles and labels.
+- **Wrap-up cell** β Summarize what was covered and suggest next steps or further reading.
+
+## Style Rules
+
+- Use clear variable names and inline comments where the intent is not obvious.
+- Prefer f-strings for string formatting.
+- Keep code cells focused: one concept per cell.
+- Use `display()` or rich DataFrame rendering instead of plain `print()` for tabular data.
+- Add `# Section Title` comments at the top of code cells for scanability.
diff --git a/docs/README.agents.md b/docs/README.agents.md
index 8d9213b0..9d58c1fa 100644
--- a/docs/README.agents.md
+++ b/docs/README.agents.md
@@ -80,11 +80,11 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-agents) for guidelines on how to
| [Expert React Frontend Engineer](../agents/expert-react-frontend-engineer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fexpert-react-frontend-engineer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fexpert-react-frontend-engineer.agent.md) | Expert React 19.2 frontend engineer specializing in modern hooks, Server Components, Actions, TypeScript, and performance optimization | |
| [Expert Vue.js Frontend Engineer](../agents/vuejs-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fvuejs-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fvuejs-expert.agent.md) | Expert Vue.js frontend engineer specializing in Vue 3 Composition API, reactivity, state management, testing, and performance with TypeScript | |
| [Fedora Linux Expert](../agents/fedora-linux-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Ffedora-linux-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Ffedora-linux-expert.agent.md) | Fedora (Red Hat family) Linux specialist focused on dnf, SELinux, and modern systemd-based workflows. | |
-| [Gem Browser Tester](../agents/gem-browser-tester.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-browser-tester.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-browser-tester.agent.md) | Automates browser testing, UI/UX validation using browser automation tools and visual verification techniques | |
+| [Gem Browser Tester](../agents/gem-browser-tester.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-browser-tester.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-browser-tester.agent.md) | Automates E2E scenarios with Chrome DevTools MCP, Playwright, Agent Browser. UI/UX validation using browser automation tools and visual verification techniques | |
| [Gem Devops](../agents/gem-devops.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-devops.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-devops.agent.md) | Manages containers, CI/CD pipelines, and infrastructure deployment | |
| [Gem Documentation Writer](../agents/gem-documentation-writer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-documentation-writer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-documentation-writer.agent.md) | Generates technical docs, diagrams, maintains code-documentation parity | |
| [Gem Implementer](../agents/gem-implementer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-implementer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-implementer.agent.md) | Executes TDD code changes, ensures verification, maintains quality | |
-| [Gem Orchestrator](../agents/gem-orchestrator.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-orchestrator.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-orchestrator.agent.md) | Coordinates multi-agent workflows, delegates tasks, synthesizes results via runSubagent | |
+| [Gem Orchestrator](../agents/gem-orchestrator.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-orchestrator.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-orchestrator.agent.md) | Team Lead - Coordinates multi-agent workflows with energetic announcements, delegates tasks, synthesizes results via runSubagent | |
| [Gem Planner](../agents/gem-planner.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-planner.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-planner.agent.md) | Creates DAG-based plans with pre-mortem analysis and task decomposition from research findings | |
| [Gem Researcher](../agents/gem-researcher.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-researcher.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-researcher.agent.md) | Research specialist: gathers codebase context, identifies relevant files/patterns, returns structured findings | |
| [Gem Reviewer](../agents/gem-reviewer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-reviewer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fgem-reviewer.agent.md) | Security gatekeeper for critical tasksβOWASP, secrets, compliance | |
@@ -121,6 +121,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-agents) for guidelines on how to
| [Next.js Expert](../agents/expert-nextjs-developer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fexpert-nextjs-developer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fexpert-nextjs-developer.agent.md) | Expert Next.js 16 developer specializing in App Router, Server Components, Cache Components, Turbopack, and modern React patterns with TypeScript | |
| [Octopus Release Notes With Mcp](../agents/octopus-deploy-release-notes-mcp.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Foctopus-deploy-release-notes-mcp.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Foctopus-deploy-release-notes-mcp.agent.md) | Generate release notes for a release in Octopus Deploy. The tools for this MCP server provide access to the Octopus Deploy APIs. | octopus [](https://aka.ms/awesome-copilot/install/mcp-vscode?name=octopus&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%2540octopusdeploy%252Fmcp-server%22%5D%2C%22env%22%3A%7B%7D%7D) [](https://aka.ms/awesome-copilot/install/mcp-vscodeinsiders?name=octopus&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%2540octopusdeploy%252Fmcp-server%22%5D%2C%22env%22%3A%7B%7D%7D) [](https://aka.ms/awesome-copilot/install/mcp-visualstudio/mcp-install?%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%2540octopusdeploy%252Fmcp-server%22%5D%2C%22env%22%3A%7B%7D%7D) |
| [OpenAPI to Application Generator](../agents/openapi-to-application.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fopenapi-to-application.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fopenapi-to-application.agent.md) | Expert assistant for generating working applications from OpenAPI specifications | |
+| [Oracle To PostgreSQL Migration Expert](../agents/oracle-to-postgres-migration-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Foracle-to-postgres-migration-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Foracle-to-postgres-migration-expert.agent.md) | Agent for Oracle-to-PostgreSQL application migrations. Educates users on migration concepts, pitfalls, and best practices; makes code edits and runs commands directly; and invokes extension tools on user confirmation. | |
| [PagerDuty Incident Responder](../agents/pagerduty-incident-responder.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fpagerduty-incident-responder.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fpagerduty-incident-responder.agent.md) | Responds to PagerDuty incidents by analyzing incident context, identifying recent code changes, and suggesting fixes via GitHub PRs. | [pagerduty](https://github.com/mcp/io.github.PagerDuty/pagerduty-mcp) [](https://aka.ms/awesome-copilot/install/mcp-vscode?name=pagerduty&config=%7B%22url%22%3A%22https%3A%2F%2Fmcp.pagerduty.com%2Fmcp%22%2C%22headers%22%3A%7B%7D%7D) [](https://aka.ms/awesome-copilot/install/mcp-vscodeinsiders?name=pagerduty&config=%7B%22url%22%3A%22https%3A%2F%2Fmcp.pagerduty.com%2Fmcp%22%2C%22headers%22%3A%7B%7D%7D) [](https://aka.ms/awesome-copilot/install/mcp-visualstudio/mcp-install?%7B%22url%22%3A%22https%3A%2F%2Fmcp.pagerduty.com%2Fmcp%22%2C%22headers%22%3A%7B%7D%7D) |
| [PHP MCP Expert](../agents/php-mcp-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fphp-mcp-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fphp-mcp-expert.agent.md) | Expert assistant for PHP MCP server development using the official PHP SDK with attribute-based discovery | |
| [Pimcore Expert](../agents/pimcore-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fpimcore-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fpimcore-expert.agent.md) | Expert Pimcore development assistant specializing in CMS, DAM, PIM, and E-Commerce solutions with Symfony integration | |
@@ -147,6 +148,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-agents) for guidelines on how to
| [Prompt Builder](../agents/prompt-builder.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fprompt-builder.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fprompt-builder.agent.md) | Expert prompt engineering and validation system for creating high-quality prompts - Brought to you by microsoft/edge-ai | |
| [Prompt Engineer](../agents/prompt-engineer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fprompt-engineer.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fprompt-engineer.agent.md) | A specialized chat mode for analyzing and improving prompts. Every user input is treated as a prompt to be improved. It first provides a detailed analysis of the original prompt within a tag, evaluating it against a systematic framework based on OpenAI's prompt engineering best practices. Following the analysis, it generates a new, improved prompt. | |
| [Python MCP Server Expert](../agents/python-mcp-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fpython-mcp-expert.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fpython-mcp-expert.agent.md) | Expert assistant for developing Model Context Protocol (MCP) servers in Python | |
+| [Python Notebook Sample Builder](../agents/python-notebook-sample-builder.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fpython-notebook-sample-builder.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fpython-notebook-sample-builder.agent.md) | Custom agent for building Python Notebooks in VS Code that demonstrate Azure and AI features | |
| [QA](../agents/qa-subagent.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fqa-subagent.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Fqa-subagent.agent.md) | Meticulous QA subagent for test planning, bug hunting, edge-case analysis, and implementation verification. | |
| [Reepl Linkedin](../agents/reepl-linkedin.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Freepl-linkedin.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Freepl-linkedin.agent.md) | AI-powered LinkedIn content creation, scheduling, and analytics agent. Create posts, carousels, and manage your LinkedIn presence with GitHub Copilot. | |
| [Refine Requirement or Issue](../agents/refine-issue.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Frefine-issue.agent.md) [](https://aka.ms/awesome-copilot/install/agent?url=vscode-insiders%3Achat-agent%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fagents%2Frefine-issue.agent.md) | Refine the requirement or issue with Acceptance Criteria, Technical Considerations, Edge Cases, and NFRs | |
diff --git a/docs/README.plugins.md b/docs/README.plugins.md
index 095228f8..ec527ba7 100644
--- a/docs/README.plugins.md
+++ b/docs/README.plugins.md
@@ -1,6 +1,8 @@
# π Plugins
-Curated plugins of related agents and skills organized around specific themes, workflows, or use cases. Plugins can be installed directly via GitHub Copilot CLI.
+Curated plugins of related agents and skills organized around specific themes, workflows, or use cases. Plugins can be installed directly via GitHub Copilot CLI or VS Code.
+
+> **Awesome Copilot is a default plugin marketplace** β no setup required in either Copilot CLI or VS Code.
### How to Contribute
See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-plugins) for guidelines on how to contribute new plugins, improve existing ones, and share your use cases.
@@ -13,14 +15,18 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-plugins) for guidelines on how t
- Each plugin includes agents and skills for specific workflows
- Plugins make it easy to adopt comprehensive toolkits for particular scenarios
-**Install Plugins:**
-- Use \`copilot plugin install @awesome-copilot\` to install a plugin
-- Or browse to the individual files to copy content manually
-- Plugins help you discover related customizations you might have missed
+**Find & Install in Copilot CLI:**
+- Browse the marketplace from within an interactive Copilot session: \`/plugin marketplace browse awesome-copilot\`
+- Install a plugin: \`copilot plugin install @awesome-copilot\`
+
+**Find & Install in VS Code:**
+- Open the Extensions search view and type \`@agentPlugins\` to browse available plugins
+- Or open the Command Palette and run \`Chat: Plugins\`
| Name | Description | Items | Tags |
| ---- | ----------- | ----- | ---- |
-| [awesome-copilot](../plugins/awesome-copilot/README.md) | Meta prompts that help you discover and generate curated GitHub Copilot agents, instructions, prompts, and skills. | 5 items | github-copilot, discovery, meta, prompt-engineering, agents |
+| [automate-this](../plugins/automate-this/README.md) | Record your screen doing a manual process, drop the video on your Desktop, and let Copilot CLI analyze it frame-by-frame to build working automation scripts. Supports narrated recordings with audio transcription. | 1 items | automation, screen-recording, workflow, video-analysis, process-automation, scripting, productivity, copilot-cli |
+| [awesome-copilot](../plugins/awesome-copilot/README.md) | Meta prompts that help you discover and generate curated GitHub Copilot agents, instructions, prompts, and skills. | 4 items | github-copilot, discovery, meta, prompt-engineering, agents |
| [azure-cloud-development](../plugins/azure-cloud-development/README.md) | Comprehensive Azure cloud development tools including Infrastructure as Code, serverless functions, architecture patterns, and cost optimization for building scalable cloud applications. | 11 items | azure, cloud, infrastructure, bicep, terraform, serverless, architecture, devops |
| [cast-imaging](../plugins/cast-imaging/README.md) | A comprehensive collection of specialized agents for software analysis, impact assessment, structural quality advisories, and architectural review using CAST Imaging. | 3 items | cast-imaging, software-analysis, architecture, quality, impact-analysis, devops |
| [clojure-interactive-programming](../plugins/clojure-interactive-programming/README.md) | Tools for REPL-first Clojure workflows featuring Clojure instructions, the interactive programming chat mode and supporting guidance. | 2 items | clojure, repl, interactive-programming |
@@ -33,19 +39,22 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-plugins) for guidelines on how t
| [dataverse-sdk-for-python](../plugins/dataverse-sdk-for-python/README.md) | Comprehensive collection for building production-ready Python integrations with Microsoft Dataverse. Includes official documentation, best practices, advanced features, file operations, and code generation prompts. | 4 items | dataverse, python, integration, sdk |
| [devops-oncall](../plugins/devops-oncall/README.md) | A focused set of prompts, instructions, and a chat mode to help triage incidents and respond quickly with DevOps tools and Azure resources. | 3 items | devops, incident-response, oncall, azure |
| [edge-ai-tasks](../plugins/edge-ai-tasks/README.md) | Task Researcher and Task Planner for intermediate to expert users and large codebases - Brought to you by microsoft/edge-ai | 2 items | architecture, planning, research, tasks, implementation |
+| [flowstudio-power-automate](../plugins/flowstudio-power-automate/README.md) | Complete toolkit for managing Power Automate cloud flows via the FlowStudio MCP server. Includes skills for connecting to the MCP server, debugging failed flow runs, and building/deploying flows from natural language. | 3 items | power-automate, power-platform, flowstudio, mcp, model-context-protocol, cloud-flows, workflow-automation |
| [frontend-web-dev](../plugins/frontend-web-dev/README.md) | Essential prompts, instructions, and chat modes for modern frontend web development including React, Angular, Vue, TypeScript, and CSS frameworks. | 4 items | frontend, web, react, typescript, javascript, css, html, angular, vue |
-| [gem-team](../plugins/gem-team/README.md) | A modular multi-agent team for complex project execution with DAG-based planning, parallel execution, TDD verification, and automated testing. | 8 items | multi-agent, orchestration, dag-planning, parallel-execution, tdd, verification, automation, security, prd |
+| [gem-team](../plugins/gem-team/README.md) | A modular multi-agent team for complex project execution with DAG-based planning, parallel execution, TDD verification, and automated testing with energetic team lead. | 8 items | multi-agent, orchestration, dag-planning, parallel-execution, tdd, verification, automation, security, prd |
| [go-mcp-development](../plugins/go-mcp-development/README.md) | Complete toolkit for building Model Context Protocol (MCP) servers in Go using the official github.com/modelcontextprotocol/go-sdk. Includes instructions for best practices, a prompt for generating servers, and an expert chat mode for guidance. | 2 items | go, golang, mcp, model-context-protocol, server-development, sdk |
| [java-development](../plugins/java-development/README.md) | Comprehensive collection of prompts and instructions for Java development including Spring Boot, Quarkus, testing, documentation, and best practices. | 4 items | java, springboot, quarkus, jpa, junit, javadoc |
| [java-mcp-development](../plugins/java-mcp-development/README.md) | Complete toolkit for building Model Context Protocol servers in Java using the official MCP Java SDK with reactive streams and Spring Boot integration. | 2 items | java, mcp, model-context-protocol, server-development, sdk, reactive-streams, spring-boot, reactor |
| [kotlin-mcp-development](../plugins/kotlin-mcp-development/README.md) | Complete toolkit for building Model Context Protocol (MCP) servers in Kotlin using the official io.modelcontextprotocol:kotlin-sdk library. Includes instructions for best practices, a prompt for generating servers, and an expert chat mode for guidance. | 2 items | kotlin, mcp, model-context-protocol, kotlin-multiplatform, server-development, ktor |
| [mcp-m365-copilot](../plugins/mcp-m365-copilot/README.md) | Comprehensive collection for building declarative agents with Model Context Protocol integration for Microsoft 365 Copilot | 4 items | mcp, m365-copilot, declarative-agents, api-plugins, model-context-protocol, adaptive-cards |
+| [napkin](../plugins/napkin/README.md) | Visual whiteboard collaboration for Copilot CLI. Opens an interactive whiteboard in your browser where you can draw, sketch, and add sticky notes β then share everything back with Copilot. Copilot sees your drawings and responds with analysis, suggestions, and ideas. | 1 items | whiteboard, visual, collaboration, brainstorming, non-technical, drawing, sticky-notes, accessibility, copilot-cli, ux |
| [noob-mode](../plugins/noob-mode/README.md) | Plain-English translation layer for non-technical Copilot CLI users. Translates every approval prompt, error message, and technical output into clear, jargon-free English with color-coded risk indicators. | 1 items | accessibility, plain-english, non-technical, beginner, translation, copilot-cli, ux |
| [openapi-to-application-csharp-dotnet](../plugins/openapi-to-application-csharp-dotnet/README.md) | Generate production-ready .NET applications from OpenAPI specifications. Includes ASP.NET Core project scaffolding, controller generation, entity framework integration, and C# best practices. | 2 items | openapi, code-generation, api, csharp, dotnet, aspnet |
| [openapi-to-application-go](../plugins/openapi-to-application-go/README.md) | Generate production-ready Go applications from OpenAPI specifications. Includes project scaffolding, handler generation, middleware setup, and Go best practices for REST APIs. | 2 items | openapi, code-generation, api, go, golang |
| [openapi-to-application-java-spring-boot](../plugins/openapi-to-application-java-spring-boot/README.md) | Generate production-ready Spring Boot applications from OpenAPI specifications. Includes project scaffolding, REST controller generation, service layer organization, and Spring Boot best practices. | 2 items | openapi, code-generation, api, java, spring-boot |
| [openapi-to-application-nodejs-nestjs](../plugins/openapi-to-application-nodejs-nestjs/README.md) | Generate production-ready NestJS applications from OpenAPI specifications. Includes project scaffolding, controller and service generation, TypeScript best practices, and enterprise patterns. | 2 items | openapi, code-generation, api, nodejs, typescript, nestjs |
| [openapi-to-application-python-fastapi](../plugins/openapi-to-application-python-fastapi/README.md) | Generate production-ready FastAPI applications from OpenAPI specifications. Includes project scaffolding, route generation, dependency injection, and Python best practices for async APIs. | 2 items | openapi, code-generation, api, python, fastapi |
+| [oracle-to-postgres-migration-expert](../plugins/oracle-to-postgres-migration-expert/README.md) | Expert agent for Oracle-to-PostgreSQL application migrations in .NET solutions. Performs code edits, runs commands, and invokes extension tools to migrate .NET/Oracle data access patterns to PostgreSQL. | 8 items | oracle, postgresql, database-migration, dotnet, sql, migration, integration-testing, stored-procedures |
| [ospo-sponsorship](../plugins/ospo-sponsorship/README.md) | Tools and resources for Open Source Program Offices (OSPOs) to identify, evaluate, and manage sponsorship of open source dependencies through GitHub Sponsors, Open Collective, and other funding platforms. | 1 items | |
| [partners](../plugins/partners/README.md) | Custom agents that have been created by GitHub partners | 20 items | devops, security, database, cloud, infrastructure, observability, feature-flags, cicd, migration, performance |
| [pcf-development](../plugins/pcf-development/README.md) | Complete toolkit for developing custom code components using Power Apps Component Framework for model-driven and canvas apps | 0 items | power-apps, pcf, component-framework, typescript, power-platform |
diff --git a/docs/README.skills.md b/docs/README.skills.md
index e90a64b7..677ea5c4 100644
--- a/docs/README.skills.md
+++ b/docs/README.skills.md
@@ -30,12 +30,13 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [agent-governance](../skills/agent-governance/SKILL.md) | Patterns and techniques for adding governance, safety, and trust controls to AI agent systems. Use this skill when: - Building AI agents that call external tools (APIs, databases, file systems) - Implementing policy-based access controls for agent tool usage - Adding semantic intent classification to detect dangerous prompts - Creating trust scoring systems for multi-agent workflows - Building audit trails for agent actions and decisions - Enforcing rate limits, content filters, or tool restrictions on agents - Working with any agent framework (PydanticAI, CrewAI, OpenAI Agents, LangChain, AutoGen) | None |
| [agentic-eval](../skills/agentic-eval/SKILL.md) | Patterns and techniques for evaluating and improving AI agent outputs. Use this skill when: - Implementing self-critique and reflection loops - Building evaluator-optimizer pipelines for quality-critical generation - Creating test-driven code refinement workflows - Designing rubric-based or LLM-as-judge evaluation systems - Adding iterative improvement to agent outputs (code, reports, analysis) - Measuring and improving agent response quality | None |
| [ai-prompt-engineering-safety-review](../skills/ai-prompt-engineering-safety-review/SKILL.md) | Comprehensive AI prompt engineering safety review and improvement prompt. Analyzes prompts for safety, bias, security vulnerabilities, and effectiveness while providing detailed improvement recommendations with extensive frameworks, testing methodologies, and educational content. | None |
-| [appinsights-instrumentation](../skills/appinsights-instrumentation/SKILL.md) | Instrument a webapp to send useful telemetry data to Azure App Insights | `LICENSE.txt` `examples/appinsights.bicep` `references/ASPNETCORE.md` `references/AUTO.md` `references/NODEJS.md` `references/PYTHON.md` `scripts/appinsights.ps1` |
+| [appinsights-instrumentation](../skills/appinsights-instrumentation/SKILL.md) | Instrument a webapp to send useful telemetry data to Azure App Insights | `LICENSE.txt` `examples` `references/ASPNETCORE.md` `references/AUTO.md` `references/NODEJS.md` `references/PYTHON.md` `scripts/appinsights.ps1` |
| [apple-appstore-reviewer](../skills/apple-appstore-reviewer/SKILL.md) | Serves as a reviewer of the codebase with instructions on looking for Apple App Store optimizations or rejection reasons. | None |
| [arch-linux-triage](../skills/arch-linux-triage/SKILL.md) | Triage and resolve Arch Linux issues with pacman, systemd, and rolling-release best practices. | None |
| [architecture-blueprint-generator](../skills/architecture-blueprint-generator/SKILL.md) | Comprehensive project architecture blueprint generator that analyzes codebases to create detailed architectural documentation. Automatically detects technology stacks and architectural patterns, generates visual diagrams, documents implementation patterns, and provides extensible blueprints for maintaining architectural consistency and guiding new development. | None |
| [aspire](../skills/aspire/SKILL.md) | Aspire skill covering the Aspire CLI, AppHost orchestration, service discovery, integrations, MCP server, VS Code extension, Dev Containers, GitHub Codespaces, templates, dashboard, and deployment. Use when the user asks to create, run, debug, configure, deploy, or troubleshoot an Aspire distributed application. | `references/architecture.md` `references/cli-reference.md` `references/dashboard.md` `references/deployment.md` `references/integrations-catalog.md` `references/mcp-server.md` `references/polyglot-apis.md` `references/testing.md` `references/troubleshooting.md` |
| [aspnet-minimal-api-openapi](../skills/aspnet-minimal-api-openapi/SKILL.md) | Create ASP.NET Minimal API endpoints with proper OpenAPI documentation | None |
+| [automate-this](../skills/automate-this/SKILL.md) | Analyze a screen recording of a manual process and produce targeted, working automation scripts. Extracts frames and audio narration from video files, reconstructs the step-by-step workflow, and proposes automation at multiple complexity levels using tools already installed on the user machine. | None |
| [az-cost-optimize](../skills/az-cost-optimize/SKILL.md) | Analyze Azure resources used in the app (IaC files and/or resources in a target rg) and optimize costs - creating GitHub issues for identified optimizations. | None |
| [azure-deployment-preflight](../skills/azure-deployment-preflight/SKILL.md) | Performs comprehensive preflight validation of Bicep deployments to Azure, including template syntax validation, what-if analysis, and permission checks. Use this skill before any deployment to Azure to preview changes, identify potential issues, and ensure the deployment will succeed. Activate when users mention deploying to Azure, validating Bicep files, checking deployment permissions, previewing infrastructure changes, running what-if, or preparing for azd provision. | `references/ERROR-HANDLING.md` `references/REPORT-TEMPLATE.md` `references/VALIDATION-COMMANDS.md` |
| [azure-devops-cli](../skills/azure-devops-cli/SKILL.md) | Manage Azure DevOps resources via CLI including projects, repos, pipelines, builds, pull requests, work items, artifacts, and service endpoints. Use when working with Azure DevOps, az commands, devops automation, CI/CD, or when user mentions Azure DevOps CLI. | `references/advanced-usage.md` `references/boards-and-iterations.md` `references/org-and-security.md` `references/pipelines-and-builds.md` `references/repos-and-prs.md` `references/variables-and-agents.md` `references/workflows-and-patterns.md` |
@@ -54,6 +55,8 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [breakdown-test](../skills/breakdown-test/SKILL.md) | Test Planning and Quality Assurance prompt that generates comprehensive test strategies, task breakdowns, and quality validation plans for GitHub projects. | None |
| [centos-linux-triage](../skills/centos-linux-triage/SKILL.md) | Triage and resolve CentOS issues using RHEL-compatible tooling, SELinux-aware practices, and firewalld. | None |
| [chrome-devtools](../skills/chrome-devtools/SKILL.md) | Expert-level browser automation, debugging, and performance analysis using Chrome DevTools MCP. Use for interacting with web pages, capturing screenshots, analyzing network traffic, and profiling performance. | None |
+| [cli-mastery](../skills/cli-mastery/SKILL.md) | Interactive training for the GitHub Copilot CLI. Guided lessons, quizzes, scenario challenges, and a full reference covering slash commands, shortcuts, modes, agents, skills, MCP, and configuration. Say "cliexpert" to start. | `references/final-exam.md` `references/module-1-slash-commands.md` `references/module-2-keyboard-shortcuts.md` `references/module-3-modes.md` `references/module-4-agents.md` `references/module-5-skills.md` `references/module-6-mcp.md` `references/module-7-advanced.md` `references/module-8-configuration.md` `references/scenarios.md` |
+| [cloud-design-patterns](../skills/cloud-design-patterns/SKILL.md) | Cloud design patterns for distributed systems architecture covering 42 industry-standard patterns across reliability, performance, messaging, security, and deployment categories. Use when designing, reviewing, or implementing distributed system architectures. | `references/architecture-design.md` `references/azure-service-mappings.md` `references/best-practices.md` `references/deployment-operational.md` `references/event-driven.md` `references/messaging-integration.md` `references/performance.md` `references/reliability-resilience.md` `references/security.md` |
| [code-exemplars-blueprint-generator](../skills/code-exemplars-blueprint-generator/SKILL.md) | Technology-agnostic prompt generator that creates customizable AI prompts for scanning codebases and identifying high-quality code exemplars. Supports multiple programming languages (.NET, Java, JavaScript, TypeScript, React, Angular, Python) with configurable analysis depth, categorization methods, and documentation formats to establish coding standards and maintain consistency across development teams. | None |
| [comment-code-generate-a-tutorial](../skills/comment-code-generate-a-tutorial/SKILL.md) | Transform this Python script into a polished, beginner-friendly project by refactoring the code, adding clear instructional comments, and generating a complete markdown tutorial. | None |
| [containerize-aspnet-framework](../skills/containerize-aspnet-framework/SKILL.md) | Containerize an ASP.NET .NET Framework project by creating Dockerfile and .dockerfile files customized for the project. | None |
@@ -64,6 +67,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [copilot-cli-quickstart](../skills/copilot-cli-quickstart/SKILL.md) | Use this skill when someone wants to learn GitHub Copilot CLI from scratch. Offers interactive step-by-step tutorials with separate Developer and Non-Developer tracks, plus on-demand Q&A. Just say "start tutorial" or ask a question! Note: This skill targets GitHub Copilot CLI specifically and uses CLI-specific tools (ask_user, sql, fetch_copilot_cli_documentation). | None |
| [copilot-instructions-blueprint-generator](../skills/copilot-instructions-blueprint-generator/SKILL.md) | Technology-agnostic blueprint generator for creating comprehensive copilot-instructions.md files that guide GitHub Copilot to produce code consistent with project standards, architecture patterns, and exact technology versions by analyzing existing codebase patterns and avoiding assumptions. | None |
| [copilot-sdk](../skills/copilot-sdk/SKILL.md) | Build agentic applications with GitHub Copilot SDK. Use when embedding AI agents in apps, creating custom tools, implementing streaming responses, managing sessions, connecting to MCP servers, or creating custom agents. Triggers on Copilot SDK, GitHub SDK, agentic app, embed Copilot, programmable agent, MCP server, custom agent. | None |
+| [copilot-spaces](../skills/copilot-spaces/SKILL.md) | Use Copilot Spaces to provide project-specific context to conversations. Use this skill when users mention a "Copilot space", want to load context from a shared knowledge base, discover available spaces, or ask questions grounded in curated project documentation, code, and instructions. | None |
| [copilot-usage-metrics](../skills/copilot-usage-metrics/SKILL.md) | Retrieve and display GitHub Copilot usage metrics for organizations and enterprises using the GitHub CLI and REST API. | `get-enterprise-metrics.sh` `get-enterprise-user-metrics.sh` `get-org-metrics.sh` `get-org-user-metrics.sh` |
| [cosmosdb-datamodeling](../skills/cosmosdb-datamodeling/SKILL.md) | Step-by-step guide for capturing key application requirements for NoSQL use-case and produce Azure Cosmos DB Data NoSQL Model design using best practices and common patterns, artifacts_produced: "cosmosdb_requirements.md" file and "cosmosdb_data_model.md" file | None |
| [create-agentsmd](../skills/create-agentsmd/SKILL.md) | Prompt for generating an AGENTS.md file for a repository | None |
@@ -83,6 +87,9 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [create-technical-spike](../skills/create-technical-spike/SKILL.md) | Create time-boxed technical spike documents for researching and resolving critical development decisions before implementation. | None |
| [create-tldr-page](../skills/create-tldr-page/SKILL.md) | Create a tldr page from documentation URLs and command examples, requiring both URL and command name. | None |
| [create-web-form](../skills/create-web-form/SKILL.md) | Create robust, accessible web forms with best practices for HTML structure, CSS styling, JavaScript interactivity, form validation, and server-side processing. Use when asked to "create a form", "build a web form", "add a contact form", "make a signup form", or when building any HTML form with data handling. Covers PHP and Python backends, MySQL database integration, REST APIs, XML data exchange, accessibility (ARIA), and progressive web apps. | `references/accessibility.md` `references/aria-form-role.md` `references/css-styling.md` `references/form-basics.md` `references/form-controls.md` `references/form-data-handling.md` `references/html-form-elements.md` `references/html-form-example.md` `references/hypertext-transfer-protocol.md` `references/javascript.md` `references/php-cookies.md` `references/php-forms.md` `references/php-json.md` `references/php-mysql-database.md` `references/progressive-web-app.md` `references/python-as-web-framework.md` `references/python-contact-form.md` `references/python-flask-app.md` `references/python-flask.md` `references/security.md` `references/styling-web-forms.md` `references/web-api.md` `references/web-performance.md` `references/xml.md` |
+| [creating-oracle-to-postgres-master-migration-plan](../skills/creating-oracle-to-postgres-master-migration-plan/SKILL.md) | Discovers all projects in a .NET solution, classifies each for Oracle-to-PostgreSQL migration eligibility, and produces a persistent master migration plan. Use when starting a multi-project Oracle-to-PostgreSQL migration, creating a migration inventory, or assessing which .NET projects contain Oracle dependencies. | None |
+| [creating-oracle-to-postgres-migration-bug-report](../skills/creating-oracle-to-postgres-migration-bug-report/SKILL.md) | Creates structured bug reports for defects found during Oracle-to-PostgreSQL migration. Use when documenting behavioral differences between Oracle and PostgreSQL as actionable bug reports with severity, root cause, and remediation steps. | `references/BUG-REPORT-TEMPLATE.md` |
+| [creating-oracle-to-postgres-migration-integration-tests](../skills/creating-oracle-to-postgres-migration-integration-tests/SKILL.md) | Creates integration test cases for .NET data access artifacts during Oracle-to-PostgreSQL database migrations. Generates DB-agnostic xUnit tests with deterministic seed data that validate behavior consistency across both database systems. Use when creating integration tests for a migrated project, generating test coverage for data access layers, or writing Oracle-to-PostgreSQL migration validation tests. | None |
| [csharp-async](../skills/csharp-async/SKILL.md) | Get best practices for C# async programming | None |
| [csharp-docs](../skills/csharp-docs/SKILL.md) | Ensure that C# types are documented with XML comments and follow best practices for documentation. | None |
| [csharp-mcp-server-generator](../skills/csharp-mcp-server-generator/SKILL.md) | Generate a complete MCP server project in C# with tools, prompts, and proper configuration | None |
@@ -105,12 +112,15 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [editorconfig](../skills/editorconfig/SKILL.md) | Generates a comprehensive and best-practice-oriented .editorconfig file based on project analysis and user preferences. | None |
| [ef-core](../skills/ef-core/SKILL.md) | Get best practices for Entity Framework Core | None |
| [entra-agent-user](../skills/entra-agent-user/SKILL.md) | Create Agent Users in Microsoft Entra ID from Agent Identities, enabling AI agents to act as digital workers with user identity capabilities in Microsoft 365 and Azure environments. | None |
-| [excalidraw-diagram-generator](../skills/excalidraw-diagram-generator/SKILL.md) | Generate Excalidraw diagrams from natural language descriptions. Use when asked to "create a diagram", "make a flowchart", "visualize a process", "draw a system architecture", "create a mind map", or "generate an Excalidraw file". Supports flowcharts, relationship diagrams, mind maps, and system architecture diagrams. Outputs .excalidraw JSON files that can be opened directly in Excalidraw. | `references/element-types.md` `references/excalidraw-schema.md` `scripts/.gitignore` `scripts/README.md` `scripts/add-arrow.py` `scripts/add-icon-to-diagram.py` `scripts/split-excalidraw-library.py` `templates/business-flow-swimlane-template.excalidraw` `templates/class-diagram-template.excalidraw` `templates/data-flow-diagram-template.excalidraw` `templates/er-diagram-template.excalidraw` `templates/flowchart-template.excalidraw` `templates/mindmap-template.excalidraw` `templates/relationship-template.excalidraw` `templates/sequence-diagram-template.excalidraw` |
+| [excalidraw-diagram-generator](../skills/excalidraw-diagram-generator/SKILL.md) | Generate Excalidraw diagrams from natural language descriptions. Use when asked to "create a diagram", "make a flowchart", "visualize a process", "draw a system architecture", "create a mind map", or "generate an Excalidraw file". Supports flowcharts, relationship diagrams, mind maps, and system architecture diagrams. Outputs .excalidraw JSON files that can be opened directly in Excalidraw. | `references/element-types.md` `references/excalidraw-schema.md` `scripts/.gitignore` `scripts/README.md` `scripts/add-arrow.py` `scripts/add-icon-to-diagram.py` `scripts/split-excalidraw-library.py` `templates` |
| [fabric-lakehouse](../skills/fabric-lakehouse/SKILL.md) | Use this skill to get context about Fabric Lakehouse and its features for software systems and AI-powered functions. It offers descriptions of Lakehouse data components, organization with schemas and shortcuts, access control, and code examples. This skill supports users in designing, building, and optimizing Lakehouse solutions using best practices. | `references/getdata.md` `references/pyspark.md` |
| [fedora-linux-triage](../skills/fedora-linux-triage/SKILL.md) | Triage and resolve Fedora issues with dnf, systemd, and SELinux-aware guidance. | None |
| [finalize-agent-prompt](../skills/finalize-agent-prompt/SKILL.md) | Finalize prompt file using the role of an AI agent to polish the prompt for the end user. | None |
| [finnish-humanizer](../skills/finnish-humanizer/SKILL.md) | Detect and remove AI-generated markers from Finnish text, making it sound like a native Finnish speaker wrote it. Use when asked to "humanize", "naturalize", or "remove AI feel" from Finnish text, or when editing .md/.txt files containing Finnish content. Identifies 26 patterns (12 Finnish-specific + 14 universal) and 4 style markers. | `references/patterns.md` |
| [first-ask](../skills/first-ask/SKILL.md) | Interactive, input-tool powered, task refinement workflow: interrogates scope, deliverables, constraints before carrying out the task; Requires the Joyride extension. | None |
+| [flowstudio-power-automate-build](../skills/flowstudio-power-automate-build/SKILL.md) | Build, scaffold, and deploy Power Automate cloud flows using the FlowStudio MCP server. Load this skill when asked to: create a flow, build a new flow, deploy a flow definition, scaffold a Power Automate workflow, construct a flow JSON, update an existing flow's actions, patch a flow definition, add actions to a flow, wire up connections, or generate a workflow definition from scratch. Requires a FlowStudio MCP subscription β see https://mcp.flowstudio.app | `references/action-patterns-connectors.md` `references/action-patterns-core.md` `references/action-patterns-data.md` `references/build-patterns.md` `references/flow-schema.md` `references/trigger-types.md` |
+| [flowstudio-power-automate-debug](../skills/flowstudio-power-automate-debug/SKILL.md) | Debug failing Power Automate cloud flows using the FlowStudio MCP server. Load this skill when asked to: debug a flow, investigate a failed run, why is this flow failing, inspect action outputs, find the root cause of a flow error, fix a broken Power Automate flow, diagnose a timeout, trace a DynamicOperationRequestFailure, check connector auth errors, read error details from a run, or troubleshoot expression failures. Requires a FlowStudio MCP subscription β see https://mcp.flowstudio.app | `references/common-errors.md` `references/debug-workflow.md` |
+| [flowstudio-power-automate-mcp](../skills/flowstudio-power-automate-mcp/SKILL.md) | Connect to and operate Power Automate cloud flows via a FlowStudio MCP server. Use when asked to: list flows, read a flow definition, check run history, inspect action outputs, resubmit a run, cancel a running flow, view connections, get a trigger URL, validate a definition, monitor flow health, or any task that requires talking to the Power Automate API through an MCP tool. Also use for Power Platform environment discovery and connection management. Requires a FlowStudio MCP subscription or compatible server β see https://mcp.flowstudio.app | `references/MCP-BOOTSTRAP.md` `references/action-types.md` `references/connection-references.md` `references/tool-reference.md` |
| [fluentui-blazor](../skills/fluentui-blazor/SKILL.md) | Guide for using the Microsoft Fluent UI Blazor component library (Microsoft.FluentUI.AspNetCore.Components NuGet package) in Blazor applications. Use this when the user is building a Blazor app with Fluent UI components, setting up the library, using FluentUI components like FluentButton, FluentDataGrid, FluentDialog, FluentToast, FluentNavMenu, FluentTextField, FluentSelect, FluentAutocomplete, FluentDesignTheme, or any component prefixed with "Fluent". Also use when troubleshooting missing providers, JS interop issues, or theming. | `references/DATAGRID.md` `references/LAYOUT-AND-NAVIGATION.md` `references/SETUP.md` `references/THEMING.md` |
| [folder-structure-blueprint-generator](../skills/folder-structure-blueprint-generator/SKILL.md) | Comprehensive technology-agnostic prompt for analyzing and documenting project folder structures. Auto-detects project types (.NET, Java, React, Angular, Python, Node.js, Flutter), generates detailed blueprints with visualization options, naming conventions, file placement patterns, and extension templates for maintaining consistent code organization across diverse technology stacks. | None |
| [game-engine](../skills/game-engine/SKILL.md) | Expert skill for building web-based game engines and games using HTML5, Canvas, WebGL, and JavaScript. Use when asked to create games, build game engines, implement game physics, handle collision detection, set up game loops, manage sprites, add game controls, or work with 2D/3D rendering. Covers techniques for platformers, breakout-style games, maze games, tilemaps, audio, multiplayer via WebRTC, and publishing games. | `assets/2d-maze-game.md` `assets/2d-platform-game.md` `assets/gameBase-template-repo.md` `assets/paddle-game-template.md` `assets/simple-2d-engine.md` `references/3d-web-games.md` `references/algorithms.md` `references/basics.md` `references/game-control-mechanisms.md` `references/game-engine-core-principles.md` `references/game-publishing.md` `references/techniques.md` `references/terminology.md` `references/web-apis.md` |
@@ -120,7 +130,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [git-commit](../skills/git-commit/SKILL.md) | Execute git commit with conventional commit message analysis, intelligent staging, and message generation. Use when user asks to commit changes, create a git commit, or mentions "/commit". Supports: (1) Auto-detecting type and scope from changes, (2) Generating conventional commit messages from diff, (3) Interactive commit with optional type/scope/description overrides, (4) Intelligent file staging for logical grouping | None |
| [git-flow-branch-creator](../skills/git-flow-branch-creator/SKILL.md) | Intelligent Git Flow branch creator that analyzes git status/diff and creates appropriate branches following the nvie Git Flow branching model. | None |
| [github-copilot-starter](../skills/github-copilot-starter/SKILL.md) | Set up complete GitHub Copilot configuration for a new project based on technology stack | None |
-| [github-issues](../skills/github-issues/SKILL.md) | Create, update, and manage GitHub issues using MCP tools. Use this skill when users want to create bug reports, feature requests, or task issues, update existing issues, add labels/assignees/milestones, set issue fields (dates, priority, custom fields), set issue types, or manage issue workflows. Triggers on requests like "create an issue", "file a bug", "request a feature", "update issue X", "set the priority", "set the start date", or any GitHub issue management task. | `references/dependencies.md` `references/issue-fields.md` `references/issue-types.md` `references/projects.md` `references/sub-issues.md` `references/templates.md` |
+| [github-issues](../skills/github-issues/SKILL.md) | Create, update, and manage GitHub issues using MCP tools. Use this skill when users want to create bug reports, feature requests, or task issues, update existing issues, add labels/assignees/milestones, set issue fields (dates, priority, custom fields), set issue types, manage issue workflows, link issues, add dependencies, or track blocked-by/blocking relationships. Triggers on requests like "create an issue", "file a bug", "request a feature", "update issue X", "set the priority", "set the start date", "link issues", "add dependency", "blocked by", "blocking", or any GitHub issue management task. | `references/dependencies.md` `references/images.md` `references/issue-fields.md` `references/issue-types.md` `references/projects.md` `references/search.md` `references/sub-issues.md` `references/templates.md` |
| [go-mcp-server-generator](../skills/go-mcp-server-generator/SKILL.md) | Generate a complete Go MCP server project with proper structure, dependencies, and implementation using the official github.com/modelcontextprotocol/go-sdk. | None |
| [image-manipulation-image-magick](../skills/image-manipulation-image-magick/SKILL.md) | Process and manipulate images using ImageMagick. Supports resizing, format conversion, batch processing, and retrieving image metadata. Use when working with images, creating thumbnails, resizing wallpapers, or performing batch image operations. | None |
| [import-infrastructure-as-code](../skills/import-infrastructure-as-code/SKILL.md) | Import existing Azure resources into Terraform using Azure CLI discovery and Azure Verified Modules (AVM). Use when asked to reverse-engineer live Azure infrastructure, generate Infrastructure as Code from existing subscriptions/resource groups/resource IDs, map dependencies, derive exact import addresses from downloaded module source, prevent configuration drift, and produce AVM-based Terraform files ready for validation and planning across any Azure resource type. | None |
@@ -150,6 +160,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [microsoft-code-reference](../skills/microsoft-code-reference/SKILL.md) | Look up Microsoft API references, find working code samples, and verify SDK code is correct. Use when working with Azure SDKs, .NET libraries, or Microsoft APIsβto find the right method, check parameters, get working examples, or troubleshoot errors. Catches hallucinated methods, wrong signatures, and deprecated patterns by querying official docs. | None |
| [microsoft-docs](../skills/microsoft-docs/SKILL.md) | Query official Microsoft documentation to find concepts, tutorials, and code examples across Azure, .NET, Agent Framework, Aspire, VS Code, GitHub, and more. Uses Microsoft Learn MCP as the default, with Context7 and Aspire MCP for content that lives outside learn.microsoft.com. | None |
| [microsoft-skill-creator](../skills/microsoft-skill-creator/SKILL.md) | Create agent skills for Microsoft technologies using Learn MCP tools. Use when users want to create a skill that teaches agents about any Microsoft technology, library, framework, or service (Azure, .NET, M365, VS Code, Bicep, etc.). Investigates topics deeply, then generates a hybrid skill storing essential knowledge locally while enabling dynamic deeper investigation. | `references/skill-templates.md` |
+| [migrating-oracle-to-postgres-stored-procedures](../skills/migrating-oracle-to-postgres-stored-procedures/SKILL.md) | Migrates Oracle PL/SQL stored procedures to PostgreSQL PL/pgSQL. Translates Oracle-specific syntax, preserves method signatures and type-anchored parameters, leverages orafce where appropriate, and applies COLLATE "C" for Oracle-compatible text sorting. Use when converting Oracle stored procedures or functions to PostgreSQL equivalents during a database migration. | None |
| [mkdocs-translations](../skills/mkdocs-translations/SKILL.md) | Generate a language translation for a mkdocs documentation stack. | None |
| [model-recommendation](../skills/model-recommendation/SKILL.md) | Analyze chatmode or prompt files and recommend optimal AI models based on task complexity, required capabilities, and cost-efficiency | None |
| [msstore-cli](../skills/msstore-cli/SKILL.md) | Microsoft Store Developer CLI (msstore) for publishing Windows applications to the Microsoft Store. Use when asked to configure Store credentials, list Store apps, check submission status, publish submissions, manage package flights, set up CI/CD for Store publishing, or integrate with Partner Center. Supports Windows App SDK/WinUI, UWP, .NET MAUI, Flutter, Electron, React Native, and PWA applications. | None |
@@ -157,6 +168,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [my-issues](../skills/my-issues/SKILL.md) | List my issues in the current repository | None |
| [my-pull-requests](../skills/my-pull-requests/SKILL.md) | List my pull requests in the current repository | None |
| [nano-banana-pro-openrouter](../skills/nano-banana-pro-openrouter/SKILL.md) | Generate or edit images via OpenRouter with the Gemini 3 Pro Image model. Use for prompt-only image generation, image edits, and multi-image compositing; supports 1K/2K/4K output. | `assets/SYSTEM_TEMPLATE` `scripts/generate_image.py` |
+| [napkin](../skills/napkin/SKILL.md) | Visual whiteboard collaboration for Copilot CLI. Creates an interactive whiteboard that opens in your browser β draw, sketch, add sticky notes, then share everything back with Copilot. Copilot sees your drawings and text, and responds with analysis, suggestions, and ideas. | `assets/napkin.html` `assets/step1-activate.svg` `assets/step2-whiteboard.svg` `assets/step3-draw.svg` `assets/step4-share.svg` `assets/step5-response.svg` |
| [next-intl-add-language](../skills/next-intl-add-language/SKILL.md) | Add new language to a Next.js + next-intl application | None |
| [noob-mode](../skills/noob-mode/SKILL.md) | Plain-English translation layer for non-technical Copilot CLI users. Translates every approval prompt, error message, and technical output into clear, jargon-free English with color-coded risk indicators. | `references/examples.md` `references/glossary.md` |
| [nuget-manager](../skills/nuget-manager/SKILL.md) | Manage NuGet packages in .NET projects/solutions. Use this skill when adding, removing, or updating NuGet package versions. It enforces using `dotnet` CLI for package management and provides strict procedures for direct file edits only when updating versions. | None |
@@ -164,6 +176,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [pdftk-server](../skills/pdftk-server/SKILL.md) | Skill for using the command-line tool pdftk (PDFtk Server) for working with PDF files. Use when asked to merge PDFs, split PDFs, rotate pages, encrypt or decrypt PDFs, fill PDF forms, apply watermarks, stamp overlays, extract metadata, burst documents into pages, repair corrupted PDFs, attach or extract files, or perform any PDF manipulation from the command line. | `references/download.md` `references/pdftk-cli-examples.md` `references/pdftk-man-page.md` `references/pdftk-server-license.md` `references/third-party-materials.md` |
| [penpot-uiux-design](../skills/penpot-uiux-design/SKILL.md) | Comprehensive guide for creating professional UI/UX designs in Penpot using MCP tools. Use this skill when: (1) Creating new UI/UX designs for web, mobile, or desktop applications, (2) Building design systems with components and tokens, (3) Designing dashboards, forms, navigation, or landing pages, (4) Applying accessibility standards and best practices, (5) Following platform guidelines (iOS, Android, Material Design), (6) Reviewing or improving existing Penpot designs for usability. Triggers: "design a UI", "create interface", "build layout", "design dashboard", "create form", "design landing page", "make it accessible", "design system", "component library". | `references/accessibility.md` `references/component-patterns.md` `references/platform-guidelines.md` `references/setup-troubleshooting.md` |
| [php-mcp-server-generator](../skills/php-mcp-server-generator/SKILL.md) | Generate a complete PHP Model Context Protocol server project with tools, resources, prompts, and tests using the official PHP SDK | None |
+| [planning-oracle-to-postgres-migration-integration-testing](../skills/planning-oracle-to-postgres-migration-integration-testing/SKILL.md) | Creates an integration testing plan for .NET data access artifacts during Oracle-to-PostgreSQL database migrations. Analyzes a single project to identify repositories, DAOs, and service layers that interact with the database, then produces a structured testing plan. Use when planning integration test coverage for a migrated project, identifying which data access methods need tests, or preparing for Oracle-to-PostgreSQL migration validation. | None |
| [plantuml-ascii](../skills/plantuml-ascii/SKILL.md) | Generate ASCII art diagrams using PlantUML text mode. Use when user asks to create ASCII diagrams, text-based diagrams, terminal-friendly diagrams, or mentions plantuml ascii, text diagram, ascii art diagram. Supports: Converting PlantUML diagrams to ASCII art, Creating sequence diagrams, class diagrams, flowcharts in ASCII format, Generating Unicode-enhanced ASCII art with -utxt flag | None |
| [playwright-automation-fill-in-form](../skills/playwright-automation-fill-in-form/SKILL.md) | Automate filling in a form using Playwright MCP | None |
| [playwright-explore-website](../skills/playwright-explore-website/SKILL.md) | Website exploration for testing using Playwright MCP | None |
@@ -192,8 +205,10 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [remember-interactive-programming](../skills/remember-interactive-programming/SKILL.md) | A micro-prompt that reminds the agent that it is an interactive programmer. Works great in Clojure when Copilot has access to the REPL (probably via Backseat Driver). Will work with any system that has a live REPL that the agent can use. Adapt the prompt with any specific reminders in your workflow and/or workspace. | None |
| [repo-story-time](../skills/repo-story-time/SKILL.md) | Generate a comprehensive repository summary and narrative story from commit history | None |
| [review-and-refactor](../skills/review-and-refactor/SKILL.md) | Review and refactor code in your project according to defined instructions | None |
+| [reviewing-oracle-to-postgres-migration](../skills/reviewing-oracle-to-postgres-migration/SKILL.md) | Identifies Oracle-to-PostgreSQL migration risks by cross-referencing code against known behavioral differences (empty strings, refcursors, type coercion, sorting, timestamps, concurrent transactions, etc.). Use when planning a database migration, reviewing migration artifacts, or validating that integration tests cover Oracle/PostgreSQL differences. | `references/REFERENCE.md` `references/empty-strings-handling.md` `references/no-data-found-exceptions.md` `references/oracle-parentheses-from-clause.md` `references/oracle-to-postgres-sorting.md` `references/oracle-to-postgres-timestamp-timezone.md` `references/oracle-to-postgres-to-char-numeric.md` `references/oracle-to-postgres-type-coercion.md` `references/postgres-concurrent-transactions.md` `references/postgres-refcursor-handling.md` |
| [ruby-mcp-server-generator](../skills/ruby-mcp-server-generator/SKILL.md) | Generate a complete Model Context Protocol server project in Ruby using the official MCP Ruby SDK gem. | None |
| [rust-mcp-server-generator](../skills/rust-mcp-server-generator/SKILL.md) | Generate a complete Rust Model Context Protocol server project with tools, prompts, resources, and tests using the official rmcp SDK | None |
+| [scaffolding-oracle-to-postgres-migration-test-project](../skills/scaffolding-oracle-to-postgres-migration-test-project/SKILL.md) | Scaffolds an xUnit integration test project for validating Oracle-to-PostgreSQL database migration behavior in .NET solutions. Creates the test project, transaction-rollback base class, and seed data manager. Use when setting up test infrastructure before writing migration integration tests, or when a test project is needed for Oracle-to-PostgreSQL validation. | None |
| [scoutqa-test](../skills/scoutqa-test/SKILL.md) | This skill should be used when the user asks to "test this website", "run exploratory testing", "check for accessibility issues", "verify the login flow works", "find bugs on this page", or requests automated QA testing. Triggers on web application testing scenarios including smoke tests, accessibility audits, e-commerce flows, and user flow validation using ScoutQA CLI. IMPORTANT: Use this skill proactively after implementing web application features to verify they work correctly - don't wait for the user to ask for testing. | None |
| [shuffle-json-data](../skills/shuffle-json-data/SKILL.md) | Shuffle repetitive JSON objects safely by validating schema consistency before randomising entries. | None |
| [snowflake-semanticview](../skills/snowflake-semanticview/SKILL.md) | Create, alter, and validate Snowflake semantic views using Snowflake CLI (snow). Use when asked to build or troubleshoot semantic views/semantic layer definitions with CREATE/ALTER SEMANTIC VIEW, to validate semantic-view DDL against Snowflake via CLI, or to guide Snowflake CLI installation and connection setup. | None |
@@ -205,7 +220,6 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [structured-autonomy-plan](../skills/structured-autonomy-plan/SKILL.md) | Structured Autonomy Planning Prompt | None |
| [suggest-awesome-github-copilot-agents](../skills/suggest-awesome-github-copilot-agents/SKILL.md) | Suggest relevant GitHub Copilot Custom Agents files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing custom agents in this repository, and identifying outdated agents that need updates. | None |
| [suggest-awesome-github-copilot-instructions](../skills/suggest-awesome-github-copilot-instructions/SKILL.md) | Suggest relevant GitHub Copilot instruction files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing instructions in this repository, and identifying outdated instructions that need updates. | None |
-| [suggest-awesome-github-copilot-prompts](../skills/suggest-awesome-github-copilot-prompts/SKILL.md) | Suggest relevant GitHub Copilot prompt files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing prompts in this repository, and identifying outdated prompts that need updates. | None |
| [suggest-awesome-github-copilot-skills](../skills/suggest-awesome-github-copilot-skills/SKILL.md) | Suggest relevant GitHub Copilot skills from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing skills in this repository, and identifying outdated skills that need updates. | None |
| [swift-mcp-server-generator](../skills/swift-mcp-server-generator/SKILL.md) | Generate a complete Model Context Protocol server project in Swift using the official MCP Swift SDK package. | None |
| [technology-stack-blueprint-generator](../skills/technology-stack-blueprint-generator/SKILL.md) | Comprehensive technology stack blueprint generator that analyzes codebases to create detailed architectural documentation. Automatically detects technology stacks, programming languages, and implementation patterns across multiple platforms (.NET, Java, JavaScript, React, Python). Generates configurable blueprints with version information, licensing details, usage patterns, coding conventions, and visual diagrams. Provides implementation-ready templates and maintains architectural consistency for guided development. | None |
@@ -229,7 +243,7 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-skills) for guidelines on how to
| [webapp-testing](../skills/webapp-testing/SKILL.md) | Toolkit for interacting with and testing local web applications using Playwright. Supports verifying frontend functionality, debugging UI behavior, capturing browser screenshots, and viewing browser logs. | `test-helper.js` |
| [what-context-needed](../skills/what-context-needed/SKILL.md) | Ask Copilot what files it needs to see before answering a question | None |
| [winapp-cli](../skills/winapp-cli/SKILL.md) | Windows App Development CLI (winapp) for building, packaging, and deploying Windows applications. Use when asked to initialize Windows app projects, create MSIX packages, generate AppxManifest.xml, manage development certificates, add package identity for debugging, sign packages, publish to the Microsoft Store, create external catalogs, or access Windows SDK build tools. Supports .NET (csproj), C++, Electron, Rust, Tauri, and cross-platform frameworks targeting Windows. | None |
-| [winmd-api-search](../skills/winmd-api-search/SKILL.md) | Find and explore Windows desktop APIs. Use when building features that need platform capabilities β camera, file access, notifications, UI controls, AI/ML, sensors, networking, etc. Discovers the right API for a task and retrieves full type details (methods, properties, events, enumeration values). | `LICENSE.txt` `scripts/Invoke-WinMdQuery.ps1` `scripts/Update-WinMdCache.ps1` `scripts/cache-generator/CacheGenerator.csproj` `scripts/cache-generator/Directory.Build.props` `scripts/cache-generator/Directory.Build.targets` `scripts/cache-generator/Directory.Packages.props` `scripts/cache-generator/Program.cs` |
+| [winmd-api-search](../skills/winmd-api-search/SKILL.md) | Find and explore Windows desktop APIs. Use when building features that need platform capabilities β camera, file access, notifications, UI controls, AI/ML, sensors, networking, etc. Discovers the right API for a task and retrieves full type details (methods, properties, events, enumeration values). | `LICENSE.txt` `scripts/Invoke-WinMdQuery.ps1` `scripts/Update-WinMdCache.ps1` `scripts/cache-generator` |
| [winui3-migration-guide](../skills/winui3-migration-guide/SKILL.md) | UWP-to-WinUI 3 migration reference. Maps legacy UWP APIs to correct Windows App SDK equivalents with before/after code snippets. Covers namespace changes, threading (CoreDispatcher to DispatcherQueue), windowing (CoreWindow to AppWindow), dialogs, pickers, sharing, printing, background tasks, and the most common Copilot code generation mistakes. | None |
| [workiq-copilot](../skills/workiq-copilot/SKILL.md) | Guides the Copilot CLI on how to use the WorkIQ CLI/MCP server to query Microsoft 365 Copilot data (emails, meetings, docs, Teams, people) for live context, summaries, and recommendations. | None |
| [write-coding-standards-from-file](../skills/write-coding-standards-from-file/SKILL.md) | Write a coding standards document for a project using the coding styles from the file(s) and/or folder(s) passed as arguments in the prompt. | None |
diff --git a/eng/constants.mjs b/eng/constants.mjs
index 50f85cb8..21a11053 100644
--- a/eng/constants.mjs
+++ b/eng/constants.mjs
@@ -27,7 +27,9 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-instructions) for guidelines on
pluginsSection: `## π Plugins
-Curated plugins of related agents and skills organized around specific themes, workflows, or use cases. Plugins can be installed directly via GitHub Copilot CLI.`,
+Curated plugins of related agents and skills organized around specific themes, workflows, or use cases. Plugins can be installed directly via GitHub Copilot CLI or VS Code.
+
+> **Awesome Copilot is a default plugin marketplace** β no setup required in either Copilot CLI or VS Code.`,
pluginsUsage: `### How to Contribute
@@ -41,10 +43,13 @@ See [CONTRIBUTING.md](../CONTRIBUTING.md#adding-plugins) for guidelines on how t
- Each plugin includes agents and skills for specific workflows
- Plugins make it easy to adopt comprehensive toolkits for particular scenarios
-**Install Plugins:**
-- Use \\\`copilot plugin install @awesome-copilot\\\` to install a plugin
-- Or browse to the individual files to copy content manually
-- Plugins help you discover related customizations you might have missed`,
+**Find & Install in Copilot CLI:**
+- Browse the marketplace from within an interactive Copilot session: \\\`/plugin marketplace browse awesome-copilot\\\`
+- Install a plugin: \\\`copilot plugin install @awesome-copilot\\\`
+
+**Find & Install in VS Code:**
+- Open the Extensions search view and type \\\`@agentPlugins\\\` to browse available plugins
+- Or open the Command Palette and run \\\`Chat: Plugins\\\``,
featuredPluginsSection: `## π Featured Plugins
diff --git a/eng/generate-website-data.mjs b/eng/generate-website-data.mjs
index 79b28a1f..a2e2dca0 100644
--- a/eng/generate-website-data.mjs
+++ b/eng/generate-website-data.mjs
@@ -544,6 +544,63 @@ function generatePluginsData(gitDates) {
}
}
+ // Load external plugins from plugins/external.json
+ const externalJsonPath = path.join(PLUGINS_DIR, "external.json");
+ if (fs.existsSync(externalJsonPath)) {
+ try {
+ const externalPlugins = JSON.parse(
+ fs.readFileSync(externalJsonPath, "utf-8")
+ );
+ if (Array.isArray(externalPlugins)) {
+ let addedCount = 0;
+ for (const ext of externalPlugins) {
+ if (!ext.name || !ext.description) {
+ console.warn(
+ `Skipping external plugin with missing name/description`
+ );
+ continue;
+ }
+
+ // Skip if a local plugin with the same name already exists
+ if (plugins.some((p) => p.id === ext.name)) {
+ console.warn(
+ `Skipping external plugin "${ext.name}" β local plugin with same name exists`
+ );
+ continue;
+ }
+
+ const tags = ext.keywords || ext.tags || [];
+
+ plugins.push({
+ id: ext.name,
+ name: ext.name,
+ description: ext.description || "",
+ path: `plugins/${ext.name}`,
+ tags: tags,
+ itemCount: 0,
+ items: [],
+ external: true,
+ repository: ext.repository || null,
+ homepage: ext.homepage || null,
+ author: ext.author || null,
+ license: ext.license || null,
+ source: ext.source || null,
+ lastUpdated: null,
+ searchText: `${ext.name} ${ext.description || ""} ${tags.join(
+ " "
+ )} ${ext.author?.name || ""} ${ext.repository || ""}`.toLowerCase(),
+ });
+ addedCount++;
+ }
+ console.log(
+ ` β Loaded ${addedCount} external plugin(s)`
+ );
+ }
+ } catch (e) {
+ console.warn(`Failed to parse external plugins: ${e.message}`);
+ }
+ }
+
// Collect all unique tags
const allTags = [...new Set(plugins.flatMap((p) => p.tags))].sort();
@@ -900,6 +957,12 @@ async function main() {
`β Generated ${samplesData.totalRecipes} recipes in ${samplesData.totalCookbooks} cookbooks (${samplesData.filters.languages.length} languages, ${samplesData.filters.tags.length} tags)`
);
+ // Count contributors from .all-contributorsrc for manifest stats
+ const contributorsRcPath = path.join(ROOT_FOLDER, ".all-contributorsrc");
+ const contributorCount = fs.existsSync(contributorsRcPath)
+ ? (JSON.parse(fs.readFileSync(contributorsRcPath, "utf-8")).contributors || []).length
+ : 0;
+
const searchIndex = generateSearchIndex(
agents,
instructions,
@@ -967,6 +1030,7 @@ async function main() {
workflows: workflows.length,
plugins: plugins.length,
tools: tools.length,
+ contributors: contributorCount,
samples: samplesData.totalRecipes,
total: searchIndex.length,
},
diff --git a/eng/yaml-parser.mjs b/eng/yaml-parser.mjs
index 27aeaf0d..19bb3f73 100644
--- a/eng/yaml-parser.mjs
+++ b/eng/yaml-parser.mjs
@@ -145,10 +145,11 @@ function parseSkillMetadata(skillPath) {
// List bundled assets (all files except SKILL.md), recursing through subdirectories
const getAllFiles = (dirPath, arrayOfFiles = []) => {
const files = fs.readdirSync(dirPath);
+ const assetPaths = ['references', 'assets', 'scripts'];
files.forEach((file) => {
const filePath = path.join(dirPath, file);
- if (fs.statSync(filePath).isDirectory()) {
+ if (fs.statSync(filePath).isDirectory() && assetPaths.includes(file)) {
arrayOfFiles = getAllFiles(filePath, arrayOfFiles);
} else {
const relativePath = path.relative(skillPath, filePath);
diff --git a/instructions/scala2.instructions.md b/instructions/scala2.instructions.md
index 2a16663c..9dfbac9c 100644
--- a/instructions/scala2.instructions.md
+++ b/instructions/scala2.instructions.md
@@ -1,6 +1,6 @@
---
description: 'Scala 2.12/2.13 programming language coding conventions and best practices following Databricks style guide for functional programming, type safety, and production code quality.'
-applyTo: '**.scala, **/build.sbt, **/build.sc'
+applyTo: '**/*.scala, **/build.sbt, **/build.sc'
---
# Scala Best Practices
diff --git a/plugins/automate-this/.github/plugin/plugin.json b/plugins/automate-this/.github/plugin/plugin.json
new file mode 100644
index 00000000..0824ae3d
--- /dev/null
+++ b/plugins/automate-this/.github/plugin/plugin.json
@@ -0,0 +1,23 @@
+{
+ "name": "automate-this",
+ "description": "Record your screen doing a manual process, drop the video on your Desktop, and let Copilot CLI analyze it frame-by-frame to build working automation scripts. Supports narrated recordings with audio transcription.",
+ "version": "1.0.0",
+ "author": {
+ "name": "Awesome Copilot Community"
+ },
+ "repository": "https://github.com/github/awesome-copilot",
+ "license": "MIT",
+ "keywords": [
+ "automation",
+ "screen-recording",
+ "workflow",
+ "video-analysis",
+ "process-automation",
+ "scripting",
+ "productivity",
+ "copilot-cli"
+ ],
+ "skills": [
+ "./skills/automate-this/"
+ ]
+}
diff --git a/plugins/automate-this/README.md b/plugins/automate-this/README.md
new file mode 100644
index 00000000..bbeb81cd
--- /dev/null
+++ b/plugins/automate-this/README.md
@@ -0,0 +1,87 @@
+# Automate This
+
+You know that thing you do every week β the fifteen-click, four-app, copy-paste-into-spreadsheet-and-email-it process that makes you want to throw your laptop into the ocean? Record yourself doing it once, hand the video to Copilot CLI, and let it write the script that does it for you.
+
+## How It Works
+
+1. **Record your screen.** Use QuickTime, OBS, Loom, or whatever you already have. Do the process exactly the way you normally do. If you want to talk through it while you record ("now I'm downloading this report because finance needs it every Monday"), even better β the plugin transcribes your narration and uses it to understand *why* you're doing each step, not just *what* you're clicking.
+
+2. **Drop the recording on your Desktop** (or anywhere β it just needs a file path).
+
+3. **Tell Copilot CLI to analyze it:**
+ ```
+ copilot
+ > /skills
+ > (select automate-this)
+ > Analyze ~/Desktop/weekly-report-process.mov and automate it
+ ```
+
+4. **Review what it found.** The plugin extracts frames from your video every couple of seconds, transcribes any narration, and reconstructs your process as a numbered step list. It asks you to confirm it got it right before proposing anything.
+
+5. **Pick your automation level.** You'll get up to three proposals ranging from "here's a one-liner that handles the worst part" to "here's a full script with scheduling and error handling." Each one uses tools you already have installed β no surprise dependency chains.
+
+## What Happens Under the Hood
+
+The plugin uses `ffmpeg` to pull frames and audio from your recording. If you narrated, it uses OpenAI's Whisper (running locally on your machine, not in the cloud) to transcribe what you said. The frames go to the AI model, which can read text on screen, identify applications, see what you're clicking, and follow the flow of your process.
+
+Before proposing automation, the plugin checks your environment β what's installed, what shell you use, what scripting languages are available β so it only suggests things you can actually run without spending an hour on setup.
+
+## Prerequisites
+
+- **ffmpeg** (required) β for extracting frames and audio from your recording
+ ```bash
+ brew install ffmpeg
+ ```
+
+- **Whisper** (optional) β only needed if your recording has narration
+ ```bash
+ pip install openai-whisper
+ ```
+ Or the C++ version: `brew install whisper-cpp`
+
+If Whisper isn't installed and your recording has audio, the plugin will let you know and offer to proceed with visual-only analysis.
+
+## Installation
+
+```bash
+copilot plugin install automate-this@awesome-copilot
+```
+
+## What Gets Analyzed Well
+
+The plugin works best when your recording clearly shows what you're doing. Some examples of processes people automate with this:
+
+- **Report generation** β downloading data from a dashboard, filtering it, formatting it, sending it to someone
+- **File organization** β sorting downloads into folders, renaming files by date, cleaning up duplicates
+- **Data entry** β copying information from one app and entering it into another
+- **Dev environment setup** β the sequence of commands and config changes you run every time you start a new project
+- **Repetitive terminal workflows** β running the same sequence of commands with different inputs
+- **Email-based workflows** β pulling data from emails, processing it, replying with results
+
+## What Stays Manual
+
+The plugin proposes automation for mechanical steps. It preserves human judgment β if your recording shows you pausing to review something or making a decision based on what you see, that step stays manual in the automation with a prompt for your input.
+
+## Example
+
+Say you record yourself doing a weekly task: you open a browser, navigate to an internal dashboard, download three CSV exports, open each in Excel, filter for rows marked "pending," combine them into one sheet, and email it to your manager.
+
+The plugin would reconstruct that process, confirm it with you, and propose something like:
+
+- **Tier 1:** A `curl` command that downloads all three CSVs in one shot (if the dashboard has direct download URLs), skipping the browser entirely.
+- **Tier 2:** A Python script that downloads the CSVs, filters for "pending" rows using pandas, merges them, and saves the result β ready to attach to an email.
+- **Tier 3:** The same script, plus it sends the email automatically via your mail client and runs every Monday at 8am via `launchd`.
+
+Each tier includes the working code, instructions to test it with a dry run, and how to undo anything if it goes sideways.
+
+## Privacy
+
+Frame extraction (ffmpeg) and audio transcription (Whisper) run entirely on your machine. Extracted frames and audio are written to a secure temporary directory and deleted when analysis is complete. The extracted frames and transcript are sent to the AI model powering your Copilot CLI session for analysis β this is the same model and data pipeline used by all Copilot interactions. No data is sent to additional third-party services beyond what Copilot already uses.
+
+## Source
+
+This plugin is part of [Awesome Copilot](https://github.com/github/awesome-copilot), a community-driven collection of GitHub Copilot extensions.
+
+## License
+
+MIT
diff --git a/plugins/awesome-copilot/.github/plugin/plugin.json b/plugins/awesome-copilot/.github/plugin/plugin.json
index 94f73969..3ebd4b48 100644
--- a/plugins/awesome-copilot/.github/plugin/plugin.json
+++ b/plugins/awesome-copilot/.github/plugin/plugin.json
@@ -1,7 +1,7 @@
{
"name": "awesome-copilot",
"description": "Meta prompts that help you discover and generate curated GitHub Copilot agents, instructions, prompts, and skills.",
- "version": "1.0.0",
+ "version": "1.1.0",
"author": {
"name": "Awesome Copilot Community"
},
@@ -20,7 +20,6 @@
"skills": [
"./skills/suggest-awesome-github-copilot-skills/",
"./skills/suggest-awesome-github-copilot-instructions/",
- "./skills/suggest-awesome-github-copilot-prompts/",
"./skills/suggest-awesome-github-copilot-agents/"
]
}
diff --git a/plugins/awesome-copilot/README.md b/plugins/awesome-copilot/README.md
index a61c7043..421e98f1 100644
--- a/plugins/awesome-copilot/README.md
+++ b/plugins/awesome-copilot/README.md
@@ -17,7 +17,6 @@ copilot plugin install awesome-copilot@awesome-copilot
|---------|-------------|
| `/awesome-copilot:suggest-awesome-github-copilot-collections` | Suggest relevant GitHub Copilot collections from the awesome-copilot repository based on current repository context and chat history, providing automatic download and installation of collection assets, and identifying outdated collection assets that need updates. |
| `/awesome-copilot:suggest-awesome-github-copilot-instructions` | Suggest relevant GitHub Copilot instruction files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing instructions in this repository, and identifying outdated instructions that need updates. |
-| `/awesome-copilot:suggest-awesome-github-copilot-prompts` | Suggest relevant GitHub Copilot prompt files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing prompts in this repository, and identifying outdated prompts that need updates. |
| `/awesome-copilot:suggest-awesome-github-copilot-agents` | Suggest relevant GitHub Copilot Custom Agents files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing custom agents in this repository, and identifying outdated agents that need updates. |
| `/awesome-copilot:suggest-awesome-github-copilot-skills` | Suggest relevant GitHub Copilot skills from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing skills in this repository, and identifying outdated skills that need updates. |
diff --git a/plugins/external.json b/plugins/external.json
index fe51488c..ec8c8073 100644
--- a/plugins/external.json
+++ b/plugins/external.json
@@ -1 +1,20 @@
-[]
+[
+ {
+ "name": "azure",
+ "description": "Microsoft Azure MCP Server and skills for cloud resource management, deployments, and Azure services. Manage your Azure infrastructure, monitor applications, and deploy resources directly from Copilot.",
+ "version": "1.0.0",
+ "author": {
+ "name": "Microsoft",
+ "url": "https://www.microsoft.com"
+ },
+ "homepage": "https://github.com/microsoft/azure-skills",
+ "keywords": ["azure", "cloud", "infrastructure", "deployment", "microsoft", "devops"],
+ "license": "MIT",
+ "repository": "https://github.com/microsoft/github-copilot-for-azure",
+ "source": {
+ "source": "github",
+ "repo": "microsoft/azure-skills",
+ "path": ".github/plugins/azure-skills"
+ }
+ }
+]
diff --git a/plugins/flowstudio-power-automate/.github/plugin/plugin.json b/plugins/flowstudio-power-automate/.github/plugin/plugin.json
new file mode 100644
index 00000000..7c025d78
--- /dev/null
+++ b/plugins/flowstudio-power-automate/.github/plugin/plugin.json
@@ -0,0 +1,24 @@
+{
+ "name": "flowstudio-power-automate",
+ "description": "Complete toolkit for managing Power Automate cloud flows via the FlowStudio MCP server. Includes skills for connecting to the MCP server, debugging failed flow runs, and building/deploying flows from natural language.",
+ "version": "1.0.0",
+ "author": {
+ "name": "Awesome Copilot Community"
+ },
+ "repository": "https://github.com/github/awesome-copilot",
+ "license": "MIT",
+ "keywords": [
+ "power-automate",
+ "power-platform",
+ "flowstudio",
+ "mcp",
+ "model-context-protocol",
+ "cloud-flows",
+ "workflow-automation"
+ ],
+ "skills": [
+ "./skills/flowstudio-power-automate-mcp/",
+ "./skills/flowstudio-power-automate-debug/",
+ "./skills/flowstudio-power-automate-build/"
+ ]
+}
diff --git a/plugins/flowstudio-power-automate/README.md b/plugins/flowstudio-power-automate/README.md
new file mode 100644
index 00000000..4924c658
--- /dev/null
+++ b/plugins/flowstudio-power-automate/README.md
@@ -0,0 +1,37 @@
+# FlowStudio Power Automate Plugin
+
+Complete toolkit for managing Power Automate cloud flows via the FlowStudio MCP server. Connect, debug, and build/deploy flows using AI agents.
+
+Requires a FlowStudio MCP subscription β see https://flowstudio.app
+
+## Installation
+
+```bash
+# Using Copilot CLI
+copilot plugin install flowstudio-power-automate@awesome-copilot
+```
+
+## What's Included
+
+### Skills
+
+| Skill | Description |
+|-------|-------------|
+| `flowstudio-power-automate-mcp` | Core connection setup, tool discovery, and CRUD operations for Power Automate cloud flows via the FlowStudio MCP server. |
+| `flowstudio-power-automate-debug` | Step-by-step diagnostic workflow for investigating and fixing failing Power Automate cloud flow runs. |
+| `flowstudio-power-automate-build` | Build, scaffold, and deploy Power Automate cloud flows from natural language descriptions with bundled action pattern templates. |
+
+## Getting Started
+
+1. Install the plugin
+2. Subscribe to FlowStudio MCP at https://flowstudio.app
+3. Configure your MCP connection with the JWT from your workspace
+4. Ask Copilot to list your flows, debug a failure, or build a new flow
+
+## Source
+
+This plugin is part of [Awesome Copilot](https://github.com/github/awesome-copilot), a community-driven collection of GitHub Copilot extensions.
+
+## License
+
+MIT
diff --git a/plugins/gem-team/.github/plugin/plugin.json b/plugins/gem-team/.github/plugin/plugin.json
index 79b32afe..6f756168 100644
--- a/plugins/gem-team/.github/plugin/plugin.json
+++ b/plugins/gem-team/.github/plugin/plugin.json
@@ -1,7 +1,7 @@
{
"name": "gem-team",
- "description": "A modular multi-agent team for complex project execution with DAG-based planning, parallel execution, TDD verification, and automated testing.",
- "version": "1.2.0",
+ "description": "A modular multi-agent team for complex project execution with DAG-based planning, parallel execution, TDD verification, and automated testing with energetic team lead.",
+ "version": "1.2.1",
"author": {
"name": "Awesome Copilot Community"
},
diff --git a/plugins/gem-team/README.md b/plugins/gem-team/README.md
index 321c64a9..703437a0 100644
--- a/plugins/gem-team/README.md
+++ b/plugins/gem-team/README.md
@@ -15,11 +15,11 @@ copilot plugin install gem-team@awesome-copilot
| Agent | Description |
|-------|-------------|
-| `gem-orchestrator` | Coordinates multi-agent workflows, delegates tasks, synthesizes results via runSubagent |
+| `gem-orchestrator` | Team Lead - Coordinates multi-agent workflows with energetic announcements, delegates tasks, synthesizes results via runSubagent |
| `gem-researcher` | Research specialist: gathers codebase context, identifies relevant files/patterns, returns structured findings |
| `gem-planner` | Creates DAG-based plans with pre-mortem analysis and task decomposition from research findings |
| `gem-implementer` | Executes TDD code changes, ensures verification, maintains quality |
-| `gem-chrome-tester` | Automates browser testing, UI/UX validation via Chrome DevTools |
+| `gem-browser-tester` | Automates E2E scenarios with Chrome DevTools MCP, Playwright, Agent Browser. UI/UX validation using browser automation tools and visual verification techniques |
| `gem-devops` | Manages containers, CI/CD pipelines, and infrastructure deployment |
| `gem-reviewer` | Security gatekeeper for critical tasksβOWASP, secrets, compliance |
| `gem-documentation-writer` | Generates technical docs, diagrams, maintains code-documentation parity |
diff --git a/plugins/napkin/.github/plugin/plugin.json b/plugins/napkin/.github/plugin/plugin.json
new file mode 100644
index 00000000..2114c178
--- /dev/null
+++ b/plugins/napkin/.github/plugin/plugin.json
@@ -0,0 +1,25 @@
+{
+ "name": "napkin",
+ "description": "Visual whiteboard collaboration for Copilot CLI. Opens an interactive whiteboard in your browser where you can draw, sketch, and add sticky notes β then share everything back with Copilot. Copilot sees your drawings and responds with analysis, suggestions, and ideas.",
+ "version": "1.0.0",
+ "author": {
+ "name": "Awesome Copilot Community"
+ },
+ "repository": "https://github.com/github/awesome-copilot",
+ "license": "MIT",
+ "keywords": [
+ "whiteboard",
+ "visual",
+ "collaboration",
+ "brainstorming",
+ "non-technical",
+ "drawing",
+ "sticky-notes",
+ "accessibility",
+ "copilot-cli",
+ "ux"
+ ],
+ "skills": [
+ "./skills/napkin/"
+ ]
+}
diff --git a/plugins/napkin/README.md b/plugins/napkin/README.md
new file mode 100644
index 00000000..4c8cb5c7
--- /dev/null
+++ b/plugins/napkin/README.md
@@ -0,0 +1,163 @@
+# Napkin β Visual Whiteboard for Copilot CLI
+
+A whiteboard that opens in your browser and connects to Copilot CLI. Draw, sketch, add sticky notes β then share everything back with Copilot. Copilot sees your drawings and responds with analysis, suggestions, and ideas.
+
+Built for people who aren't software developers: lawyers, PMs, business stakeholders, designers, writers β anyone who thinks better visually.
+
+## Installation
+
+Install the plugin directly from Copilot CLI:
+
+```bash
+copilot plugin install napkin@awesome-copilot
+```
+
+That's it. No other software, accounts, or setup required.
+
+### Verify It's Installed
+
+Run this in Copilot CLI to confirm the plugin is available:
+
+```
+/skills
+```
+
+You should see **napkin** in the list of available skills.
+
+## How to Use It
+
+### Step 1: Say "let's napkin"
+
+Open Copilot CLI and type `let's napkin` (or "open a napkin" or "start a whiteboard"). Copilot creates a whiteboard and opens it in your browser.
+
+
+
+### Step 2: Your whiteboard opens
+
+A clean whiteboard appears in your browser with simple drawing tools. If it's your first time, a quick welcome message explains how everything works.
+
+
+
+### Step 3: Draw and brainstorm
+
+Use the tools to sketch ideas, add sticky notes, draw arrows between concepts β whatever helps you think. This is your space.
+
+
+
+### Step 4: Share with Copilot
+
+When you're ready for Copilot's input, click the green **Share with Copilot** button. It saves a screenshot and copies your notes.
+
+
+
+### Step 5: Copilot responds
+
+Go back to your terminal and say `check the napkin`. Copilot looks at your whiteboard β including your drawings β and responds.
+
+
+
+## What's Included
+
+### Skill
+
+| Skill | Description |
+|-------|-------------|
+| `napkin` | Visual whiteboard collaboration β creates a whiteboard, interprets your drawings and notes, and responds conversationally |
+
+### Bundled Assets
+
+| Asset | Description |
+|-------|-------------|
+| `assets/napkin.html` | The whiteboard application β a single HTML file that opens in any browser, no installation needed |
+
+## Whiteboard Features
+
+| Feature | What it does |
+|---------|-------------|
+| **Freehand drawing** | Draw with a pen tool, just like on paper |
+| **Shapes** | Rectangles, circles, lines, and arrows β wobbly shapes snap to clean versions |
+| **Sticky notes** | Draggable, resizable, color-coded notes (yellow, pink, blue, green) |
+| **Text labels** | Click anywhere to type text directly on the canvas |
+| **Pan and zoom** | Hold spacebar and drag to move around; scroll to zoom |
+| **Undo/Redo** | Made a mistake? Ctrl+Z to undo, Ctrl+Shift+Z to redo |
+| **Auto-save** | Your work saves automatically β close the tab, come back later, it's still there |
+| **Share with Copilot** | One button exports a screenshot and copies your text content |
+
+## How Copilot Understands Your Drawings
+
+When you click "Share with Copilot," two things happen:
+
+1. **A screenshot is saved** (`napkin-snapshot.png` in your Downloads or Desktop folder). Copilot reads this image and can see everything β sketches, arrows, groupings, annotations, sticky notes, spatial layout.
+
+2. **Your text is copied to clipboard.** This gives Copilot the exact text from your sticky notes and labels, so nothing gets misread from the image.
+
+Copilot uses both to understand what you're thinking and respond as a collaborator β not a computer analyzing data, but a colleague looking at your whiteboard sketch.
+
+## What Can You Draw?
+
+Anything. But here are some things Copilot is especially good at interpreting:
+
+| What you draw | What Copilot understands |
+|---------------|------------------------|
+| Boxes connected by arrows | A process flow or workflow |
+| Items circled together | A group of related ideas |
+| Sticky notes in different colors | Categories or priorities |
+| Text with a line through it | Something rejected or deprioritized |
+| Stars or exclamation marks | High-priority items |
+| Items on opposite sides | A comparison or contrast |
+| A rough org chart | Reporting structure or team layout |
+
+## Keyboard Shortcuts
+
+You don't need these β everything works with mouse clicks. But if you want to work faster:
+
+| Key | Tool |
+|-----|------|
+| V | Select / move |
+| P | Pen (draw) |
+| R | Rectangle |
+| C | Circle |
+| A | Arrow |
+| L | Line |
+| T | Text |
+| N | New sticky note |
+| E | Eraser |
+| Delete | Delete selected item (not yet supported) |
+| Ctrl+Z | Undo |
+| Ctrl+Shift+Z | Redo |
+| Space + drag | Pan the canvas |
+| ? | Show help |
+
+## FAQ
+
+**Do I need to install anything besides the plugin?**
+No. The whiteboard is a single HTML file that opens in your browser. No apps, no accounts, no setup.
+
+**Does it work offline?**
+Yes. Everything runs locally in your browser. No internet connection needed for the whiteboard itself.
+
+**What browsers work?**
+Any modern browser β Chrome, Safari, Edge, Firefox. Chrome works best for the "copy to clipboard" feature.
+
+**Can I save my work?**
+Yes, automatically. The whiteboard saves to your browser's local storage every few seconds. Close the tab, come back later, your work is still there.
+
+**Can Copilot really understand my drawings?**
+Yes. The AI models powering Copilot CLI (Claude, GPT) can interpret images. They can see your sketches, read your handwriting-style text, understand spatial relationships, and interpret common visual patterns like flowcharts, groupings, and annotations.
+
+**What if I'm not a good artist?**
+Doesn't matter. The whiteboard snaps wobbly shapes to clean versions, and Copilot is trained to interpret rough sketches. Stick figures and messy arrows work just fine.
+
+**How do I start over?**
+Say "let's napkin" again in the CLI. Copilot will ask if you want to keep the existing whiteboard or start fresh.
+
+**What platforms are supported?**
+macOS, Linux, and Windows. The whiteboard runs in any browser. Clipboard integration uses platform-native tools (`pbpaste` on macOS, `xclip` on Linux, PowerShell on Windows).
+
+## Source
+
+This plugin is part of [Awesome Copilot](https://github.com/github/awesome-copilot), a community-driven collection of GitHub Copilot extensions.
+
+## License
+
+MIT
diff --git a/plugins/oracle-to-postgres-migration-expert/.github/plugin/plugin.json b/plugins/oracle-to-postgres-migration-expert/.github/plugin/plugin.json
new file mode 100644
index 00000000..8022d1e6
--- /dev/null
+++ b/plugins/oracle-to-postgres-migration-expert/.github/plugin/plugin.json
@@ -0,0 +1,32 @@
+{
+ "name": "oracle-to-postgres-migration-expert",
+ "description": "Expert agent for Oracle-to-PostgreSQL application migrations in .NET solutions. Performs code edits, runs commands, and invokes extension tools to migrate .NET/Oracle data access patterns to PostgreSQL.",
+ "version": "1.0.0",
+ "author": {
+ "name": "Awesome Copilot Community"
+ },
+ "repository": "https://github.com/github/awesome-copilot",
+ "license": "MIT",
+ "keywords": [
+ "oracle",
+ "postgresql",
+ "database-migration",
+ "dotnet",
+ "sql",
+ "migration",
+ "integration-testing",
+ "stored-procedures"
+ ],
+ "agents": [
+ "./agents/oracle-to-postgres-migration-expert.md"
+ ],
+ "skills": [
+ "./skills/creating-oracle-to-postgres-master-migration-plan/",
+ "./skills/creating-oracle-to-postgres-migration-bug-report/",
+ "./skills/creating-oracle-to-postgres-migration-integration-tests/",
+ "./skills/migrating-oracle-to-postgres-stored-procedures/",
+ "./skills/planning-oracle-to-postgres-migration-integration-testing/",
+ "./skills/reviewing-oracle-to-postgres-migration/",
+ "./skills/scaffolding-oracle-to-postgres-migration-test-project/"
+ ]
+}
diff --git a/plugins/oracle-to-postgres-migration-expert/README.md b/plugins/oracle-to-postgres-migration-expert/README.md
new file mode 100644
index 00000000..ffe832f2
--- /dev/null
+++ b/plugins/oracle-to-postgres-migration-expert/README.md
@@ -0,0 +1,117 @@
+# Oracle-to-PostgreSQL Migration Expert Plugin
+
+Expert agent for Oracle-to-PostgreSQL application migrations in .NET solutions. Performs code edits, runs commands, and invokes extension tools to migrate .NET/Oracle data access patterns to PostgreSQL.
+
+## Installation
+
+```bash
+# Using Copilot CLI
+copilot plugin install oracle-to-postgres-migration-expert@awesome-copilot
+```
+
+## What's Included
+
+### Agents
+
+| Agent | Description |
+|-------|-------------|
+| `Oracle-to-PostgreSQL Migration Expert` | Expert agent for OracleβPostgreSQL migrations. Makes code edits and runs commands directly, educates users on migration concepts and pitfalls, and invokes extension tools on user confirmation. |
+
+### Skills
+
+| Skill | Description |
+|-------|-------------|
+| `reviewing-oracle-to-postgres-migration` | Identifies Oracle-to-PostgreSQL migration risks by cross-referencing code against known behavioral differences (empty strings, refcursors, type coercion, sorting, timestamps, concurrent transactions, etc.). |
+| `creating-oracle-to-postgres-master-migration-plan` | Discovers all projects in a .NET solution, classifies each for Oracle-to-PostgreSQL migration eligibility, and produces a persistent master migration plan. |
+| `migrating-oracle-to-postgres-stored-procedures` | Migrates Oracle PL/SQL stored procedures to PostgreSQL PL/pgSQL. Translates Oracle-specific syntax, preserves method signatures and type-anchored parameters, and applies `COLLATE "C"` for Oracle-compatible text sorting. |
+| `planning-oracle-to-postgres-migration-integration-testing` | Creates an integration testing plan for .NET data access artifacts, identifying repositories, DAOs, and service layers that need validation coverage. |
+| `scaffolding-oracle-to-postgres-migration-test-project` | Scaffolds an xUnit integration test project with a transaction-rollback base class and seed data manager for Oracle-to-PostgreSQL migration validation. |
+| `creating-oracle-to-postgres-migration-integration-tests` | Generates DB-agnostic xUnit integration tests with deterministic seed data that validate behavior consistency across both database systems. |
+| `creating-oracle-to-postgres-migration-bug-report` | Creates structured bug reports for defects discovered during Oracle-to-PostgreSQL migration validation, with severity, root cause, and remediation steps. |
+
+## Features
+
+### Educational Guidance
+
+The expert agent educates users throughout the migration journey:
+
+- **Migration Concepts**: Explains OracleβPostgreSQL differences (empty strings vs NULL, NO_DATA_FOUND exceptions, sort order, TO_CHAR conversions, type coercion strictness, REF CURSOR handling, concurrent transactions, timestamp/timezone behavior)
+- **Pitfall Reference**: Surfaces insights from migration knowledge so users understand why changes are needed
+- **Best Practices**: Advises on minimizing changes, preserving logic, and ensuring schema immutability
+- **Workflow Guidance**: Presents a four-phase migration workflow as a guide users can follow at their own pace
+
+### Suggest-Then-Act Pattern
+
+The expert suggests actionable next steps and only proceeds with user confirmation:
+
+1. **Educate** on the migration topic and why it matters
+2. **Suggest** a recommended action with expected outcomes
+3. **Confirm** the user wants to proceed
+4. **Act** β make edits, run commands, or invoke extension tools directly
+5. **Summarize** what was produced and suggest the next step
+
+No autonomous chaining β the user controls the pace and sequence.
+
+## Migration Workflow
+
+The expert guides users through a four-phase workflow:
+
+**Phase 1 β Discovery & Planning**
+
+1. Create a master migration plan (classifies all projects in the solution)
+2. Set up Oracle and PostgreSQL DDL artifacts
+
+**Phase 2 β Code Migration** *(per project)*
+3. Migrate application codebase (via `ms-ossdata.vscode-pgsql` extension)
+4. Migrate stored procedures (Oracle PL/SQL β PostgreSQL PL/pgSQL)
+
+**Phase 3 β Validation** *(per project)*
+5. Plan integration testing
+6. Scaffold the xUnit test project
+7. Create integration tests
+8. Run tests against Oracle (baseline) and PostgreSQL (target)
+9. Validate test results
+10. Create bug reports for any failures
+
+**Phase 4 β Reporting**
+11. Generate final migration report (via `ms-ossdata.vscode-pgsql` extension)
+
+## Prerequisites
+
+- Visual Studio Code with GitHub Copilot
+- PostgreSQL Extension (`ms-ossdata.vscode-pgsql`) β required for application code migration and report generation
+- .NET solution with Oracle dependencies to migrate
+
+## Directory Structure
+
+The agent expects and creates the following structure in your repository:
+
+```
+.github/
+βββ oracle-to-postgres-migration/
+ βββ Reports/
+ β βββ Master Migration Plan.md
+ β βββ {Project} Integration Testing Plan.md
+ β βββ {Project} Application Migration Report.md
+ β βββ BUG_REPORT_*.md
+ β βββ TestResults/
+ βββ DDL/
+ βββ Oracle/ # Oracle DDL scripts (pre-migration)
+ βββ Postgres/ # PostgreSQL DDL scripts (post-migration)
+```
+
+## Usage
+
+1. **Ask for Guidance**: Invoke the expert with a migration question or situation (e.g., *"How should I approach migrating my .NET solution to PostgreSQL?"* or *"What does Oracle do with empty strings that's different from PostgreSQL?"*)
+2. **Learn & Plan**: The expert explains concepts, surfaces pitfall insights, and presents recommended workflow steps
+3. **Choose Your Next Step**: Decide which task to tackle (master plan, code migration, testing, etc.)
+4. **Confirm and Act**: Tell the expert to proceed, and it makes edits, runs commands, or invokes extension tools directly
+5. **Review & Continue**: Examine the results and ask for the next step
+
+## Source
+
+This plugin is part of [Awesome Copilot](https://github.com/github/awesome-copilot), a community-driven collection of GitHub Copilot extensions.
+
+## License
+
+MIT
diff --git a/plugins/structured-autonomy/README.md b/plugins/structured-autonomy/README.md
index 2a69e99a..55abe83f 100644
--- a/plugins/structured-autonomy/README.md
+++ b/plugins/structured-autonomy/README.md
@@ -6,7 +6,7 @@ Premium planning, thrifty implementation
```bash
# Using Copilot CLI
-copilot plugin install github/awesome-copilot/plugins/structured-autonomy
+copilot plugin install structured-autonomy@awesome-copilot
```
## What's Included
diff --git a/skills/automate-this/SKILL.md b/skills/automate-this/SKILL.md
new file mode 100644
index 00000000..3d0cac53
--- /dev/null
+++ b/skills/automate-this/SKILL.md
@@ -0,0 +1,244 @@
+---
+name: automate-this
+description: 'Analyze a screen recording of a manual process and produce targeted, working automation scripts. Extracts frames and audio narration from video files, reconstructs the step-by-step workflow, and proposes automation at multiple complexity levels using tools already installed on the user machine.'
+---
+
+# Automate This
+
+Analyze a screen recording of a manual process and build working automation for it.
+
+The user records themselves doing something repetitive or tedious, hands you the video file, and you figure out what they're doing, why, and how to script it away.
+
+## Prerequisites Check
+
+Before analyzing any recording, verify the required tools are available. Run these checks silently and only surface problems:
+
+```bash
+command -v ffmpeg >/dev/null 2>&1 && ffmpeg -version 2>/dev/null | head -1 || echo "NO_FFMPEG"
+command -v whisper >/dev/null 2>&1 || command -v whisper-cpp >/dev/null 2>&1 || echo "NO_WHISPER"
+```
+
+- **ffmpeg is required.** If missing, tell the user: `brew install ffmpeg` (macOS) or the equivalent for their OS.
+- **Whisper is optional.** Only needed if the recording has narration. If missing AND the recording has an audio track, suggest: `pip install openai-whisper` or `brew install whisper-cpp`. If the user declines, proceed with visual analysis only.
+
+## Phase 1: Extract Content from the Recording
+
+Given a video file path (typically on `~/Desktop/`), extract both visual frames and audio:
+
+### Frame Extraction
+
+Extract frames at one frame every 2 seconds. This balances coverage with context window limits.
+
+```bash
+WORK_DIR=$(mktemp -d "${TMPDIR:-/tmp}/automate-this-XXXXXX")
+chmod 700 "$WORK_DIR"
+mkdir -p "$WORK_DIR/frames"
+ffmpeg -y -i "" -vf "fps=0.5" -q:v 2 -loglevel warning "$WORK_DIR/frames/frame_%04d.jpg"
+ls "$WORK_DIR/frames/" | wc -l
+```
+
+Use `$WORK_DIR` for all subsequent temp file paths in the session. The per-run directory with mode 0700 ensures extracted frames are only readable by the current user.
+
+If the recording is longer than 5 minutes (more than 150 frames), increase the interval to one frame every 4 seconds to stay within context limits. Tell the user you're sampling less frequently for longer recordings.
+
+### Audio Extraction and Transcription
+
+Check if the video has an audio track:
+
+```bash
+ffprobe -i "" -show_streams -select_streams a -loglevel error | head -5
+```
+
+If audio exists:
+
+```bash
+ffmpeg -y -i "" -ac 1 -ar 16000 -loglevel warning "$WORK_DIR/audio.wav"
+
+# Use whichever whisper binary is available
+if command -v whisper >/dev/null 2>&1; then
+ whisper "$WORK_DIR/audio.wav" --model small --language en --output_format txt --output_dir "$WORK_DIR/"
+ cat "$WORK_DIR/audio.txt"
+elif command -v whisper-cpp >/dev/null 2>&1; then
+ whisper-cpp -m "$(brew --prefix 2>/dev/null)/share/whisper-cpp/models/ggml-small.bin" -l en -f "$WORK_DIR/audio.wav" -otxt -of "$WORK_DIR/audio"
+ cat "$WORK_DIR/audio.txt"
+else
+ echo "NO_WHISPER"
+fi
+```
+
+If neither whisper binary is available and the recording has audio, inform the user they're missing narration context and ask if they want to install Whisper (`pip install openai-whisper` or `brew install whisper-cpp`) or proceed with visual-only analysis.
+
+## Phase 2: Reconstruct the Process
+
+Analyze the extracted frames (and transcript, if available) to build a structured understanding of what the user did. Work through the frames sequentially and identify:
+
+1. **Applications used** β Which apps appear in the recording? (browser, terminal, Finder, mail client, spreadsheet, IDE, etc.)
+2. **Sequence of actions** β What did the user do, in order? Click-by-click, step-by-step.
+3. **Data flow** β What information moved between steps? (copied text, downloaded files, form inputs, etc.)
+4. **Decision points** β Were there moments where the user paused, checked something, or made a choice?
+5. **Repetition patterns** β Did the user do the same thing multiple times with different inputs?
+6. **Pain points** β Where did the process look slow, error-prone, or tedious? The narration often reveals this directly ("I hate this part," "this always takes forever," "I have to do this for every single one").
+
+Present this reconstruction to the user as a numbered step list and ask them to confirm it's accurate before proposing automation. This is critical β a wrong understanding leads to useless automation.
+
+Format:
+
+```
+Here's what I see you doing in this recording:
+
+1. Open Chrome and navigate to [specific URL]
+2. Log in with credentials
+3. Click through to the reporting dashboard
+4. Download a CSV export
+5. Open the CSV in Excel
+6. Filter rows where column B is "pending"
+7. Copy those rows into a new spreadsheet
+8. Email the new spreadsheet to [recipient]
+
+You repeated steps 3-8 three times for different report types.
+
+[If narration was present]: You mentioned that the export step is the slowest
+part and that you do this every Monday morning.
+
+Does this match what you were doing? Anything I got wrong or missed?
+```
+
+Do NOT proceed to Phase 3 until the user confirms the reconstruction is accurate.
+
+## Phase 3: Environment Fingerprint
+
+Before proposing automation, understand what the user actually has to work with. Run these checks:
+
+```bash
+echo "=== OS ===" && uname -a
+echo "=== Shell ===" && echo $SHELL
+echo "=== Python ===" && { command -v python3 && python3 --version 2>&1; } || echo "not installed"
+echo "=== Node ===" && { command -v node && node --version 2>&1; } || echo "not installed"
+echo "=== Homebrew ===" && { command -v brew && echo "installed"; } || echo "not installed"
+echo "=== Common Tools ===" && for cmd in curl jq playwright selenium osascript automator crontab; do command -v $cmd >/dev/null 2>&1 && echo "$cmd: yes" || echo "$cmd: no"; done
+```
+
+Use this to constrain proposals to tools the user already has. Never propose automation that requires installing five new things unless the simpler path genuinely doesn't work.
+
+## Phase 4: Propose Automation
+
+Based on the reconstructed process and the user's environment, propose automation at up to three tiers. Not every process needs three tiers β use judgment.
+
+### Tier Structure
+
+**Tier 1 β Quick Win (under 5 minutes to set up)**
+The smallest useful automation. A shell alias, a one-liner, a keyboard shortcut, an AppleScript snippet. Automates the single most painful step, not the whole process.
+
+**Tier 2 β Script (under 30 minutes to set up)**
+A standalone script (bash, Python, or Node β whichever the user has) that automates the full process end-to-end. Handles common errors. Can be run manually when needed.
+
+**Tier 3 β Full Automation (under 2 hours to set up)**
+The script from Tier 2, plus: scheduled execution (cron, launchd, or GitHub Actions), logging, error notifications, and any necessary integration scaffolding (API keys, auth tokens, etc.).
+
+### Proposal Format
+
+For each tier, provide:
+
+```
+## Tier [N]: [Name]
+
+**What it automates:** [Which steps from the reconstruction]
+**What stays manual:** [Which steps still need a human]
+**Time savings:** [Estimated time saved per run, based on the recording length and repetition count]
+**Prerequisites:** [Anything needed that isn't already installed β ideally nothing]
+
+**How it works:**
+[2-3 sentence plain-English explanation]
+
+**The code:**
+[Complete, working, commented code β not pseudocode]
+
+**How to test it:**
+[Exact steps to verify it works, starting with a dry run if possible]
+
+**How to undo:**
+[How to reverse any changes if something goes wrong]
+```
+
+### Application-Specific Automation Strategies
+
+Use these strategies based on which applications appear in the recording:
+
+**Browser-based workflows:**
+- First choice: Check if the website has a public API. API calls are 10x more reliable than browser automation. Search for API documentation.
+- Second choice: `curl` or `wget` for simple HTTP requests with known endpoints.
+- Third choice: Playwright or Selenium for workflows that require clicking through UI. Prefer Playwright β it's faster and less flaky.
+- Look for patterns: if the user is downloading the same report from a dashboard repeatedly, it's almost certainly available via API or direct URL with query parameters.
+
+**Spreadsheet and data workflows:**
+- Python with pandas for data filtering, transformation, and aggregation.
+- If the user is doing simple column operations in Excel, a 5-line Python script replaces the entire manual process.
+- `csvkit` for quick command-line CSV manipulation without writing code.
+- If the output needs to stay in Excel format, use openpyxl.
+
+**Email workflows:**
+- macOS: `osascript` can control Mail.app to send emails with attachments.
+- Cross-platform: Python `smtplib` for sending, `imaplib` for reading.
+- If the email follows a template, generate the body from a template file with variable substitution.
+
+**File management workflows:**
+- Shell scripts for move/copy/rename patterns.
+- `find` + `xargs` for batch operations.
+- `fswatch` or `watchman` for triggered-on-change automation.
+- If the user is organizing files into folders by date or type, that's a 3-line shell script.
+
+**Terminal/CLI workflows:**
+- Shell aliases for frequently typed commands.
+- Shell functions for multi-step sequences.
+- Makefiles for project-specific task sets.
+- If the user ran the same command with different arguments, that's a loop.
+
+**macOS-specific workflows:**
+- AppleScript/JXA for controlling native apps (Mail, Calendar, Finder, Preview, etc.).
+- Shortcuts.app for simple multi-app workflows that don't need code.
+- `automator` for file-based workflows.
+- `launchd` plist files for scheduled tasks (prefer over cron on macOS).
+
+**Cross-application workflows (data moves between apps):**
+- Identify the data transfer points. Each transfer is an automation opportunity.
+- Clipboard-based transfers in the recording suggest the apps don't talk to each other β look for APIs, file-based handoffs, or direct integrations instead.
+- If the user copies from App A and pastes into App B, the automation should read from A's data source and write to B's input format directly.
+
+### Making Proposals Targeted
+
+Apply these principles to every proposal:
+
+1. **Automate the bottleneck first.** The narration and timing in the recording reveal which step is actually painful. A 30-second automation of the worst step beats a 2-hour automation of the whole process.
+
+2. **Match the user's skill level.** If the recording shows someone comfortable in a terminal, propose shell scripts. If it shows someone navigating GUIs, propose something with a simple trigger (double-click a script, run a Shortcut, or type one command).
+
+3. **Estimate real time savings.** Count the recording duration and multiply by how often they do it. "This recording is 4 minutes. You said you do this daily. That's 17 hours per year. Tier 1 cuts it to 30 seconds each time β you get 16 hours back."
+
+4. **Handle the 80% case.** The first version of the automation should cover the common path perfectly. Edge cases can be handled in Tier 3 or flagged for manual intervention.
+
+5. **Preserve human checkpoints.** If the recording shows the user reviewing or approving something mid-process, keep that as a manual step. Don't automate judgment calls.
+
+6. **Propose dry runs.** Every script should have a mode where it shows what it *would* do without doing it. `--dry-run` flags, preview output, or confirmation prompts before destructive actions.
+
+7. **Account for auth and secrets.** If the process involves logging in or using credentials, never hardcode them. Use environment variables, keychain access (macOS `security` command), or prompt for them at runtime.
+
+8. **Consider failure modes.** What happens if the website is down? If the file doesn't exist? If the format changes? Good proposals mention this and handle it.
+
+## Phase 5: Build and Test
+
+When the user picks a tier:
+
+1. Write the complete automation code to a file (suggest a sensible location β the user's project directory if one exists, or `~/Desktop/` otherwise).
+2. Walk through a dry run or test with the user watching.
+3. If the test works, show how to run it for real.
+4. If it fails, diagnose and fix β don't give up after one attempt.
+
+## Cleanup
+
+After analysis is complete (regardless of outcome), clean up extracted frames and audio:
+
+```bash
+rm -rf "$WORK_DIR"
+```
+
+Tell the user you're cleaning up temporary files so they know nothing is left behind.
diff --git a/skills/azure-devops-cli/SKILL.md b/skills/azure-devops-cli/SKILL.md
index a4a8fe58..c8e420fb 100644
--- a/skills/azure-devops-cli/SKILL.md
+++ b/skills/azure-devops-cli/SKILL.md
@@ -27,6 +27,7 @@ az extension add --name azure-devops
az devops login --organization https://dev.azure.com/{org} --token YOUR_PAT_TOKEN
# Set default organization and project (avoids repeating --org/--project)
+# Note: Legacy URL https://{org}.visualstudio.com should be replaced with https://dev.azure.com/{org}
az devops configure --defaults organization=https://dev.azure.com/{org} project={project}
# List current configuration
diff --git a/skills/cli-mastery/SKILL.md b/skills/cli-mastery/SKILL.md
new file mode 100644
index 00000000..5a6efc72
--- /dev/null
+++ b/skills/cli-mastery/SKILL.md
@@ -0,0 +1,43 @@
+---
+name: cli-mastery
+description: 'Interactive training for the GitHub Copilot CLI. Guided lessons, quizzes, scenario challenges, and a full reference covering slash commands, shortcuts, modes, agents, skills, MCP, and configuration. Say "cliexpert" to start.'
+metadata:
+ version: 1.2.0
+license: MIT
+---
+
+# Copilot CLI Mastery
+
+**UTILITY SKILL** β interactive Copilot CLI trainer.
+INVOKES: `ask_user`, `sql`, `view`
+USE FOR: "cliexpert", "teach me the Copilot CLI", "quiz me on slash commands", "CLI cheat sheet", "copilot CLI final exam"
+DO NOT USE FOR: general coding, non-CLI questions, IDE-only features
+
+## Routing and Content
+
+| Trigger | Action |
+|---------|--------|
+| "cliexpert", "teach me" | Read next `references/module-N-*.md`, teach |
+| "quiz me", "test me" | Read current module, 5+ questions via `ask_user` |
+| "scenario", "challenge" | Read `references/scenarios.md` |
+| "reference" | Read relevant module, summarize |
+| "final exam" | Read `references/final-exam.md` |
+
+Specific CLI questions get direct answers without loading references.
+Reference files in `references/` dir. Read on demand with `view`.
+
+## Behavior
+
+On first interaction, initialize progress tracking:
+```sql
+CREATE TABLE IF NOT EXISTS mastery_progress (key TEXT PRIMARY KEY, value TEXT);
+CREATE TABLE IF NOT EXISTS mastery_completed (module TEXT PRIMARY KEY, completed_at TEXT DEFAULT (datetime('now')));
+INSERT OR IGNORE INTO mastery_progress (key,value) VALUES ('xp','0'),('level','Newcomer'),('module','0');
+```
+XP: lesson +20, correct +15, perfect quiz +50, scenario +30.
+Levels: 0=Newcomer 100=Apprentice 250=Navigator 400=Practitioner 550=Specialist 700=Expert 850=Virtuoso 1000=Architect 1150=Grandmaster 1500=Wizard.
+Max XP from all content: 1600 (8 modules Γ 145 + 8 scenarios Γ 30 + final exam 200).
+
+When module counter exceeds 8 and user says "cliexpert", offer: scenarios, final exam, or review any module.
+
+Rules: `ask_user` with `choices` for ALL quizzes/scenarios. Show XP after correct answers. One concept at a time; offer quiz or review after each lesson.
diff --git a/skills/cli-mastery/references/final-exam.md b/skills/cli-mastery/references/final-exam.md
new file mode 100644
index 00000000..1a9f470f
--- /dev/null
+++ b/skills/cli-mastery/references/final-exam.md
@@ -0,0 +1,24 @@
+# Final Exam
+
+Present a 10-question comprehensive exam using `ask_user` with 4 choices each. Require 80%+ to pass. Vary the selection each time.
+
+## Question Bank
+
+1. Which command initializes Copilot CLI in a new project? β `/init`
+2. What shortcut cycles through modes? β `Shift+Tab`
+3. Where are repo-level custom agents stored? β `.github/agents/*.md`
+4. What does MCP stand for? β Model Context Protocol
+5. Which agent is safe to run in parallel? β `explore`
+6. How do you add a file to AI context? β `@filename` (e.g. `@src/auth.ts`)
+7. What file has the highest instruction precedence? β `CLAUDE.md` / `GEMINI.md` / `AGENTS.md` (git root + cwd)
+8. Which command compresses conversation history? β `/compact`
+9. Where is MCP configured at project level? β `.github/mcp-config.json`
+10. What does `--yolo` do? β Same as `--allow-all` (skip all confirmations)
+11. What does `/research` do? β Run a deep research investigation with sources
+12. Which shortcut opens input in $EDITOR? β `Ctrl+G`
+13. What does `/reset-allowed-tools` do? β Re-enables confirmation prompts
+14. Which command copies the last AI response to your clipboard? β `/copy`
+15. What does `/compact` do? β Summarizes conversation to free context
+
+On pass (80%+): Award "CLI Wizard" title, congratulate enthusiastically!
+On fail: Show which they got wrong, encourage retry.
diff --git a/skills/cli-mastery/references/module-1-slash-commands.md b/skills/cli-mastery/references/module-1-slash-commands.md
new file mode 100644
index 00000000..38b546d5
--- /dev/null
+++ b/skills/cli-mastery/references/module-1-slash-commands.md
@@ -0,0 +1,88 @@
+# Module 1: Slash Commands
+
+Teach these categories one at a time, with examples and "when to use" guidance.
+
+## Getting Started
+
+| Command | What it does | When to use |
+|---------|-------------|-------------|
+| `/login` | Authenticate with GitHub | First launch or expired session |
+| `/logout` | Sign out | Switching accounts |
+| `/help` | Show all commands | When lost |
+| `/exit` `/quit` | Exit CLI | Done working |
+| `/init` | Bootstrap copilot-instructions.md | New repo setup |
+| `/terminal-setup` | Configure multiline input | First-time setup |
+
+## Models & Agents
+
+| Command | What it does | When to use |
+|---------|-------------|-------------|
+| `/model` | Switch AI model | Need different capability/speed |
+| `/agent` | Browse/select agents | Delegate to specialist |
+| `/fleet` | Enable parallel subagents | Complex multi-part tasks |
+| `/tasks` | View background tasks | Check on running subagents |
+
+## Code & Review
+
+| Command | What it does | When to use |
+|---------|-------------|-------------|
+| `/diff` | Review changes in current dir | Before committing |
+| `/review` | Run code review agent | Get feedback on changes |
+| `/lsp` | Manage language servers | Need go-to-def, diagnostics |
+| `/ide` | Connect to IDE workspace | Want IDE integration |
+
+## Session & Context
+
+| Command | What it does | When to use |
+|---------|-------------|-------------|
+| `/context` | Show token usage visualization | Context getting large |
+| `/usage` | Display session metrics | Check premium request count |
+| `/compact` | Compress conversation history | Near context limit |
+| `/session` | Show session info | Need session details |
+| `/resume` | Switch to different session | Continue previous work |
+| `/rename` | Rename current session | Better organization |
+| `/share` | Export session to markdown/gist | Share with team |
+| `/copy` | Copy last response to clipboard | Grab AI output quickly |
+| `/clear` | Clear conversation history | Fresh start |
+
+## Permissions & Directories
+
+| Command | What it does | When to use |
+|---------|-------------|-------------|
+| `/allow-all` | Enable all permissions | Trusted environment, move fast |
+| `/add-dir` | Add trusted directory | Working across projects |
+| `/list-dirs` | Show allowed directories | Check access scope |
+| `/cwd` | Change working directory | Switch project context |
+| `/reset-allowed-tools` | Revoke tool approvals | Tighten security |
+
+## Configuration & Customization
+
+| Command | What it does | When to use |
+|---------|-------------|-------------|
+| `/instructions` | View active instruction files | Debug custom behavior |
+| `/experimental` | Toggle experimental features | Try autopilot mode |
+| `/theme` | Change terminal theme | Personalize |
+| `/streamer-mode` | Hide sensitive info | Livestreaming/demos |
+| `/changelog` | Show release notes | After update |
+| `/update` | Update CLI | New version available |
+| `/feedback` | Submit feedback | Report bug or request |
+
+## Extensibility
+
+| Command | What it does | When to use |
+|---------|-------------|-------------|
+| `/skills` | Manage skills | Browse/enable capabilities |
+| `/mcp` | Manage MCP servers | Add external tools |
+| `/plugin` | Manage plugins | Extend functionality |
+
+## Workflows & Research
+
+| Command | What it does | When to use |
+|---------|-------------|-------------|
+| `/plan` | Create implementation plan | Before complex changes |
+| `/research` | Run deep research investigation | Need thorough analysis with sources |
+| `/user` | Manage GitHub user list | Team context |
+
+## Quiz (5+ questions, use ask_user with 4 choices each)
+
+Ask "Which command would you use to [scenario]?" style questions.
diff --git a/skills/cli-mastery/references/module-2-keyboard-shortcuts.md b/skills/cli-mastery/references/module-2-keyboard-shortcuts.md
new file mode 100644
index 00000000..173232b5
--- /dev/null
+++ b/skills/cli-mastery/references/module-2-keyboard-shortcuts.md
@@ -0,0 +1,38 @@
+# Module 2: Keyboard Shortcuts
+
+## Navigation & Editing
+
+| Shortcut | Action |
+|----------|--------|
+| `@` | Mention files β include their contents as context |
+| `Ctrl+S` | Submit prompt while preserving input text |
+| `Shift+Tab` | Cycle modes: Interactive β Plan |
+| `Ctrl+T` | Toggle model reasoning display |
+| `Ctrl+O` | Expand recent timeline (when no input) |
+| `Ctrl+E` | Expand all timeline (when no input) / move to end of line (when typing) |
+| `β` `β` | Navigate command history |
+| `!` | Execute shell command directly (bypass AI) |
+| `Esc` | Cancel current operation |
+| `Ctrl+C` | Cancel operation / clear input / exit |
+| `Ctrl+D` | Shutdown session |
+| `Ctrl+L` | Clear the screen |
+| `Ctrl+G` | Edit prompt in external editor ($EDITOR) |
+
+## Line Editing
+
+| Shortcut | Action |
+|----------|--------|
+| `Ctrl+A` | Move to beginning of line |
+| `Ctrl+H` | Delete previous character |
+| `Ctrl+W` | Delete previous word |
+| `Ctrl+U` | Delete from cursor to beginning of line |
+| `Ctrl+K` | Delete from cursor to end of line |
+| `Meta+β` `Meta+β` | Move cursor by word |
+
+## Pro tips to teach
+
+- `@` is THE most important shortcut β it's how you give precise context
+- `!git status` runs git directly without AI processing
+- `Shift+Tab` into Plan mode BEFORE complex tasks
+- `Ctrl+G` opens your $EDITOR for long prompts β game changer
+- `Ctrl+S` lets you iterate on a prompt without retyping
diff --git a/skills/cli-mastery/references/module-3-modes.md b/skills/cli-mastery/references/module-3-modes.md
new file mode 100644
index 00000000..93241f71
--- /dev/null
+++ b/skills/cli-mastery/references/module-3-modes.md
@@ -0,0 +1,33 @@
+# Module 3: Interaction Modes
+
+## Interactive Mode (default)
+
+- AI acts immediately on your prompts
+- Asks permission for risky operations
+- Best for: quick tasks, debugging, exploring code
+- 80% of your time will be here
+
+## Plan Mode (`Shift+Tab` or `/plan`)
+
+- AI creates a step-by-step plan FIRST
+- You review and approve before execution
+- Best for: complex refactoring, architecture changes, risky operations
+- Key insight: Use this when mistakes are expensive
+
+## Autopilot Mode (experimental, `/experimental`)
+
+- AI acts without asking for confirmation
+- Best for: trusted environments, long-running tasks
+- Use with caution β pair with `/allow-all` or `--yolo`
+
+## Mode Comparison
+
+| Feature | Interactive | Plan | Autopilot |
+|---------|------------|------|-----------|
+| Speed | Fast | Slower | Fastest |
+| Safety | Medium | Highest | Lowest |
+| Control | You approve each action | You approve the plan | Full AI autonomy |
+| Best for | Daily tasks | Complex changes | Repetitive/trusted work |
+| Switch | Default | Shift+Tab or /plan | /experimental (enables), then Shift+Tab |
+
+Teaching point: The right mode at the right time = 10x productivity.
diff --git a/skills/cli-mastery/references/module-4-agents.md b/skills/cli-mastery/references/module-4-agents.md
new file mode 100644
index 00000000..5c037cbb
--- /dev/null
+++ b/skills/cli-mastery/references/module-4-agents.md
@@ -0,0 +1,42 @@
+# Module 4: Agent System
+
+## Built-in Agents
+
+| Agent | Model | Best For | Key Trait |
+|-------|-------|----------|-----------|
+| `explore` | Haiku | Fast codebase Q&A | Read-only, <300 words, safe to parallelize |
+| `task` | Haiku | Running commands (tests, builds, lints) | Brief on success, verbose on failure |
+| `general-purpose` | Sonnet | Complex multi-step tasks | Full toolset, separate context window |
+| `code-review` | Sonnet | Analyzing code changes | Never modifies code, high signal-to-noise |
+
+## Custom Agents β define your own in Markdown
+
+| Level | Location | Scope |
+|-------|----------|-------|
+| Personal | `~/.copilot/agents/*.md` | All your projects |
+| Project | `.github/agents/*.md` | Everyone on this repo |
+| Organization | `.github-private/agents/` in org repo | Entire org |
+
+## Agent file anatomy
+
+```markdown
+---
+name: my-agent
+description: What this agent does
+tools:
+ - bash
+ - edit
+ - view
+---
+
+# Agent Instructions
+Your detailed behavior instructions here.
+```
+
+## Agent orchestration patterns
+
+1. **Fan-out exploration** β Launch multiple `explore` agents in parallel to answer different questions simultaneously
+2. **Pipeline** β `explore` β understand β `general-purpose` β implement β `code-review` β verify
+3. **Specialist handoff** β Identify task β `/agent` to pick specialist β review with `/fleet` or `/tasks`
+
+Key insight: The AI automatically delegates to subagents when appropriate.
diff --git a/skills/cli-mastery/references/module-5-skills.md b/skills/cli-mastery/references/module-5-skills.md
new file mode 100644
index 00000000..b9f920f0
--- /dev/null
+++ b/skills/cli-mastery/references/module-5-skills.md
@@ -0,0 +1,33 @@
+# Module 5: Skills System
+
+## What are skills?
+
+- Specialized capability packages the AI can invoke
+- Think of them as "expert modes" with domain-specific knowledge
+- Managed via `/skills` command
+
+## Skill locations
+
+| Level | Location |
+|-------|----------|
+| User | `~/.copilot/skills//SKILL.md` |
+| Repo | `.github/skills//SKILL.md` |
+| Org | Shared via org-level config |
+
+## Creating a custom skill
+
+1. Create the directory: `mkdir -p ~/.copilot/skills/my-skill/`
+2. Create `SKILL.md` with YAML frontmatter (`name`, `description`, optional `tools`)
+3. Write detailed instructions for the AI's behavior
+4. Verify with `/skills`
+
+## Skill design best practices
+
+- **Clear description** β helps the AI match tasks to your skill automatically
+- **Focused scope** β each skill should do ONE thing well
+- **Include instructions** β specify exactly how the skill should operate
+- **Test thoroughly** β use `/skills` to verify, then invoke and check results
+
+## Auto-matching
+
+When you describe a task, the AI checks if any skill matches and suggests using it.
diff --git a/skills/cli-mastery/references/module-6-mcp.md b/skills/cli-mastery/references/module-6-mcp.md
new file mode 100644
index 00000000..76675451
--- /dev/null
+++ b/skills/cli-mastery/references/module-6-mcp.md
@@ -0,0 +1,50 @@
+# Module 6: MCP Integration
+
+## What is MCP?
+
+- Model Context Protocol β a standard for connecting AI to external tools
+- Think of it as "USB ports for AI" β plug in any compatible tool
+- The GitHub MCP server is **built-in** (search repos, issues, PRs, actions)
+
+## Key commands
+
+| Command | What it does |
+|---------|-------------|
+| `/mcp` | List connected MCP servers |
+| `/mcp add ` | Add a new MCP server |
+
+## Popular MCP servers
+
+- `@modelcontextprotocol/server-postgres` β Query PostgreSQL databases
+- `@modelcontextprotocol/server-sqlite` β Query SQLite databases
+- `@modelcontextprotocol/server-filesystem` β Access local files with permissions
+- `@modelcontextprotocol/server-memory` β Persistent knowledge graph
+- `@modelcontextprotocol/server-puppeteer` β Browser automation
+
+## Configuration
+
+| Level | File |
+|-------|------|
+| User | `~/.copilot/mcp-config.json` |
+| Project | `.github/mcp-config.json` |
+
+## Config file format
+
+```json
+{
+ "mcpServers": {
+ "my-server": {
+ "command": "npx",
+ "args": ["@modelcontextprotocol/server-postgres", "{{env.DATABASE_URL}}"],
+ "env": { "NODE_ENV": "development" }
+ }
+ }
+}
+```
+
+## Security best practices
+
+- Never put credentials directly in config files
+- Use environment variable references: `{{env.SECRET}}`
+- Review MCP server source before using
+- Only connect servers you actually need
diff --git a/skills/cli-mastery/references/module-7-advanced.md b/skills/cli-mastery/references/module-7-advanced.md
new file mode 100644
index 00000000..22355dc8
--- /dev/null
+++ b/skills/cli-mastery/references/module-7-advanced.md
@@ -0,0 +1,38 @@
+# Module 7: Advanced Techniques
+
+1. **`@` file mentions** β Always give precise context, don't rely on the AI finding files
+ - `@src/auth.ts` β single file
+ - `@src/components/` β directory listing
+ - "Fix @src/auth.ts to match @tests/auth.test.ts" β multi-file context
+
+2. **`! shell bypass`** β `!git log --oneline -5` runs instantly, no AI overhead
+
+3. **`/research`** β Run a deep research investigation using GitHub search and web sources
+
+4. **`/resume` + `--continue`** β Session continuity across CLI launches
+
+5. **`/compact`** β Compress history when context gets large (auto at 95%)
+ - Check with `/context` first
+ - Best used at natural task boundaries
+ - Warning signs: AI contradicting earlier statements, token usage >80%
+
+6. **`/context`** β Visualize what's eating your token budget
+
+7. **Custom instructions precedence** (highest to lowest):
+ - `CLAUDE.md` / `GEMINI.md` / `AGENTS.md` (git root + cwd)
+ - `.github/instructions/**/*.instructions.md` (path-specific!)
+ - `.github/copilot-instructions.md`
+ - `~/.copilot/copilot-instructions.md`
+ - `COPILOT_CUSTOM_INSTRUCTIONS_DIRS` (additional directories via env var)
+
+8. **Path-specific instructions:**
+ - `.github/instructions/backend.instructions.md` with `applyTo: "src/api/**"`
+ - Different coding standards for different parts of the codebase
+
+9. **LSP config** β `~/.copilot/lsp-config.json` or `.github/lsp.json`
+
+10. **`/review`** β Get code review without leaving terminal
+
+11. **`--allow-all` / `--yolo`** β Full trust mode (use responsibly!)
+
+12. **`Ctrl+T`** β Watch the AI think (learn its reasoning patterns)
diff --git a/skills/cli-mastery/references/module-8-configuration.md b/skills/cli-mastery/references/module-8-configuration.md
new file mode 100644
index 00000000..27025797
--- /dev/null
+++ b/skills/cli-mastery/references/module-8-configuration.md
@@ -0,0 +1,34 @@
+# Module 8: Configuration
+
+## Key files
+
+| File | Purpose |
+|------|---------|
+| `~/.copilot/config.json` | Main settings (model, theme, logging, experimental flags) |
+| `~/.copilot/mcp-config.json` | MCP servers |
+| `~/.copilot/lsp-config.json` | Language servers (user-level) |
+| `.github/lsp.json` | Language servers (repo-level) |
+| `~/.copilot/copilot-instructions.md` | Global custom instructions |
+| `.github/copilot-instructions.md` | Repo-level custom instructions |
+
+## Environment variables
+
+| Variable | Purpose |
+|----------|---------|
+| `EDITOR` | Text editor for `Ctrl+G` (edit prompt in external editor) |
+| `COPILOT_LOG_LEVEL` | Logging verbosity (error/warn/info/debug/trace) |
+| `GH_TOKEN` / `GITHUB_TOKEN` | GitHub authentication token (checked in order) |
+| `COPILOT_CUSTOM_INSTRUCTIONS_DIRS` | Additional directories for custom instructions |
+
+## Permissions model
+
+- Default: confirmation required for edits, creates, shell commands
+- `/allow-all` or `--yolo`: skip all confirmations for the session
+- `/reset-allowed-tools`: re-enable confirmations
+- Directory allowlists, tool approval gates, MCP server trust
+
+## Logging levels
+
+error, warn, info, debug, trace (`COPILOT_LOG_LEVEL=debug copilot`)
+
+Use debug/trace for: MCP connection issues, tool failures, unexpected behavior, bug reports
diff --git a/skills/cli-mastery/references/scenarios.md b/skills/cli-mastery/references/scenarios.md
new file mode 100644
index 00000000..5a649c69
--- /dev/null
+++ b/skills/cli-mastery/references/scenarios.md
@@ -0,0 +1,44 @@
+# Scenario Challenges
+
+Present these as real-world situations. Ask the user what commands/shortcuts they'd use.
+Use `ask_user` with choices for each step.
+
+## Scenario 1: Hotfix Review Under Pressure
+> A production bug fix is ready. You need to inspect the diff, run code review, and keep sensitive data hidden because you're on a livestream.
+
+**Answer:** `/streamer-mode` β `/diff` β `/review @src/payment.ts`
+
+## Scenario 2: Context Window Rescue
+> Your session is huge and model quality is dropping. Keep continuity while shrinking noise.
+
+**Answer:** `/context` β `/compact` β `/resume` (or restart with `--continue`)
+
+## Scenario 3: Autonomous Refactor Sprint
+> You want an agent to execute a refactor with minimal prompts, but only after reviewing a plan and setting permissions.
+
+**Answer:** `Shift+Tab` (Plan mode) β validate plan β `/allow-all` β execute in Autopilot mode
+
+## Scenario 4: Enterprise Onboarding
+> Set up custom agents, repo instructions, and MCP integration for a new team repository.
+
+**Answer:** Add agent profiles to `.github/agents/`, verify `/instructions`, then `/mcp add`
+
+## Scenario 5: Power Editing Session
+> You're crafting a long prompt and need to edit quickly without losing context.
+
+**Answer:** `Ctrl+G` (open in editor), `Ctrl+A` (jump to start), `Ctrl+K` (trim)
+
+## Scenario 6: Agent Orchestration
+> You're leading a complex project: understand code, run tests, refactor, then review.
+
+**Answer:** `explore` agent (understand) β `task` agent (tests) β `general-purpose` (refactor) β `code-review` (verify)
+
+## Scenario 7: New Project Setup
+> You cloned a new repo and need to set up Copilot CLI for max productivity.
+
+**Answer:** `/init` β `/model` β `/mcp add` (if needed) β `Shift+Tab` to Plan mode for first task
+
+## Scenario 8: Production Safety
+> Switching from boilerplate work to production deployment scripts.
+
+**Answer:** `/reset-allowed-tools` β Plan mode β `/review` before every commit
diff --git a/skills/cloud-design-patterns/SKILL.md b/skills/cloud-design-patterns/SKILL.md
new file mode 100644
index 00000000..b9a17433
--- /dev/null
+++ b/skills/cloud-design-patterns/SKILL.md
@@ -0,0 +1,62 @@
+---
+name: cloud-design-patterns
+description: 'Cloud design patterns for distributed systems architecture covering 42 industry-standard patterns across reliability, performance, messaging, security, and deployment categories. Use when designing, reviewing, or implementing distributed system architectures.'
+---
+
+# Cloud Design Patterns
+
+Architects design workloads by integrating platform services, functionality, and code to meet both functional and nonfunctional requirements. To design effective workloads, you must understand these requirements and select topologies and methodologies that address the challenges of your workload's constraints. Cloud design patterns provide solutions to many common challenges.
+
+System design heavily relies on established design patterns. You can design infrastructure, code, and distributed systems by using a combination of these patterns. These patterns are crucial for building reliable, highly secure, cost-optimized, operationally efficient, and high-performing applications in the cloud.
+
+The following cloud design patterns are technology-agnostic, which makes them suitable for any distributed system. You can apply these patterns across Azure, other cloud platforms, on-premises setups, and hybrid environments.
+
+## How Cloud Design Patterns Enhance the Design Process
+
+Cloud workloads are vulnerable to the fallacies of distributed computing, which are common but incorrect assumptions about how distributed systems operate. Examples of these fallacies include:
+
+- The network is reliable.
+- Latency is zero.
+- Bandwidth is infinite.
+- The network is secure.
+- Topology doesn't change.
+- There's one administrator.
+- Component versioning is simple.
+- Observability implementation can be delayed.
+
+These misconceptions can result in flawed workload designs. Design patterns don't eliminate these misconceptions but help raise awareness, provide compensation strategies, and provide mitigations. Each cloud design pattern has trade-offs. Focus on why you should choose a specific pattern instead of how to implement it.
+
+---
+
+## References
+
+| Reference | When to load |
+|---|---|
+| [Reliability & Resilience Patterns](references/reliability-resilience.md) | Ambassador, Bulkhead, Circuit Breaker, Compensating Transaction, Retry, Health Endpoint Monitoring, Leader Election, Saga, Sequential Convoy |
+| [Performance Patterns](references/performance.md) | Async Request-Reply, Cache-Aside, CQRS, Index Table, Materialized View, Priority Queue, Queue-Based Load Leveling, Rate Limiting, Sharding, Throttling |
+| [Messaging & Integration Patterns](references/messaging-integration.md) | Choreography, Claim Check, Competing Consumers, Messaging Bridge, Pipes and Filters, Publisher-Subscriber, Scheduler Agent Supervisor |
+| [Architecture & Design Patterns](references/architecture-design.md) | Anti-Corruption Layer, Backends for Frontends, Gateway Aggregation/Offloading/Routing, Sidecar, Strangler Fig |
+| [Deployment & Operational Patterns](references/deployment-operational.md) | Compute Resource Consolidation, Deployment Stamps, External Configuration Store, Geode, Static Content Hosting |
+| [Security Patterns](references/security.md) | Federated Identity, Quarantine, Valet Key |
+| [Event-Driven Architecture Patterns](references/event-driven.md) | Event Sourcing |
+| [Best Practices & Pattern Selection](references/best-practices.md) | Selecting appropriate patterns, Well-Architected Framework alignment, documentation, monitoring |
+| [Azure Service Mappings](references/azure-service-mappings.md) | Common Azure services for each pattern category |
+
+---
+
+## Pattern Categories at a Glance
+
+| Category | Patterns | Focus |
+|---|---|---|
+| Reliability & Resilience | 9 patterns | Fault tolerance, self-healing, graceful degradation |
+| Performance | 10 patterns | Caching, scaling, load management, data optimization |
+| Messaging & Integration | 7 patterns | Decoupling, event-driven communication, workflow coordination |
+| Architecture & Design | 7 patterns | System boundaries, API gateways, migration strategies |
+| Deployment & Operational | 5 patterns | Infrastructure management, geo-distribution, configuration |
+| Security | 3 patterns | Identity, access control, content validation |
+| Event-Driven Architecture | 1 pattern | Event sourcing and audit trails |
+
+## External Links
+
+- [Cloud Design Patterns - Azure Architecture Center](https://learn.microsoft.com/azure/architecture/patterns/)
+- [Azure Well-Architected Framework](https://learn.microsoft.com/azure/architecture/framework/)
diff --git a/skills/cloud-design-patterns/references/architecture-design.md b/skills/cloud-design-patterns/references/architecture-design.md
new file mode 100644
index 00000000..490d4136
--- /dev/null
+++ b/skills/cloud-design-patterns/references/architecture-design.md
@@ -0,0 +1,127 @@
+# Architecture & Design Patterns
+
+## Anti-Corruption Layer Pattern
+
+**Problem**: New systems must integrate with legacy systems that use outdated models or technologies.
+
+**Solution**: Implement a faΓ§ade or adapter layer between a modern application and a legacy system to prevent legacy constraints from affecting new design.
+
+**When to Use**:
+- Migrating from legacy systems incrementally
+- Integrating with third-party systems with different domain models
+- Protecting modern architectures from legacy constraints
+
+**Implementation Considerations**:
+- Create translation layer between domain models
+- Map between legacy and modern data structures
+- Isolate legacy system interfaces behind abstractions
+- Consider performance impact of translation
+- Plan for eventual removal if migration is complete
+
+## Backends for Frontends (BFF) Pattern
+
+**Problem**: A single backend may not optimally serve different client types.
+
+**Solution**: Create separate backend services to serve specific frontend applications or interfaces.
+
+**When to Use**:
+- Different client types (web, mobile, IoT) have different needs
+- Optimizing payload size and shape per client
+- Reducing coupling between frontend and shared backend
+
+**Implementation Considerations**:
+- Create one BFF per user experience or client type
+- Tailor API contracts to frontend needs
+- Avoid duplicating business logic across BFFs
+- Share common services between BFFs
+- Manage increased number of services
+
+## Gateway Aggregation Pattern
+
+**Problem**: Clients need data from multiple backend services.
+
+**Solution**: Use a gateway to aggregate multiple individual requests into a single request.
+
+**When to Use**:
+- Reducing chattiness between clients and backends
+- Combining data from multiple sources for a single view
+- Reducing latency by parallelizing backend calls
+
+**Implementation Considerations**:
+- API gateway aggregates responses from multiple services
+- Execute backend calls in parallel where possible
+- Handle partial failures appropriately
+- Consider caching of aggregated responses
+- Avoid creating a monolithic gateway
+
+## Gateway Offloading Pattern
+
+**Problem**: Shared functionality is duplicated across multiple services.
+
+**Solution**: Offload shared or specialized service functionality to a gateway proxy.
+
+**When to Use**:
+- Centralizing cross-cutting concerns (SSL, authentication, logging)
+- Simplifying service implementation
+- Standardizing shared functionality
+
+**Implementation Considerations**:
+- Offload SSL termination to gateway
+- Implement authentication and authorization at gateway
+- Handle rate limiting and throttling
+- Provide request/response logging
+- Avoid making gateway a bottleneck
+
+## Gateway Routing Pattern
+
+**Problem**: Clients need to access multiple services through a single endpoint.
+
+**Solution**: Route requests to multiple services using a single endpoint.
+
+**When to Use**:
+- Providing a single entry point for multiple services
+- Abstracting backend service topology from clients
+- Enabling service versioning and migration strategies
+
+**Implementation Considerations**:
+- Route based on URL path, headers, or query parameters
+- Support URL rewriting and transformation
+- Enable A/B testing and canary deployments
+- Implement health checks for backend services
+- Monitor routing performance
+
+## Sidecar Pattern
+
+**Problem**: Applications need auxiliary functionality without coupling.
+
+**Solution**: Deploy components of an application into a separate process or container to provide isolation and encapsulation.
+
+**When to Use**:
+- Adding functionality to applications without modifying them
+- Implementing cross-cutting concerns (logging, monitoring, security)
+- Supporting heterogeneous environments
+
+**Implementation Considerations**:
+- Deploy sidecar alongside main application
+- Share lifecycle, resources, and network with main application
+- Use for proxying, logging, configuration, or monitoring
+- Consider resource overhead of additional containers
+- Standardize sidecar implementations across services
+
+## Strangler Fig Pattern
+
+**Problem**: Legacy systems are risky to replace all at once.
+
+**Solution**: Incrementally migrate a legacy system by gradually replacing specific pieces of functionality with new applications and services.
+
+**When to Use**:
+- Modernizing legacy applications
+- Reducing risk of big-bang migrations
+- Enabling incremental business value delivery
+
+**Implementation Considerations**:
+- Identify functionality to migrate incrementally
+- Use facade or proxy to route between old and new
+- Migrate less risky components first
+- Run old and new systems in parallel initially
+- Plan for eventual decommissioning of legacy system
diff --git a/skills/cloud-design-patterns/references/azure-service-mappings.md b/skills/cloud-design-patterns/references/azure-service-mappings.md
new file mode 100644
index 00000000..9063b515
--- /dev/null
+++ b/skills/cloud-design-patterns/references/azure-service-mappings.md
@@ -0,0 +1,13 @@
+# Azure Service Mappings
+
+## Common Azure Services per Pattern
+
+- **Message Queue**: Azure Service Bus, Azure Storage Queue, Event Hubs
+- **Cache**: Azure Cache for Redis, Azure Front Door cache
+- **API Gateway**: Azure API Management, Azure Application Gateway
+- **Identity**: Azure AD, Azure AD B2C
+- **Configuration**: Azure App Configuration, Azure Key Vault
+- **Storage**: Azure Storage (Blob, Table, Queue), Azure Cosmos DB
+- **Compute**: Azure Functions, Azure Container Apps, Azure Kubernetes Service
+- **Event Streaming**: Azure Event Hubs, Azure Event Grid
+- **CDN**: Azure CDN, Azure Front Door
diff --git a/skills/cloud-design-patterns/references/best-practices.md b/skills/cloud-design-patterns/references/best-practices.md
new file mode 100644
index 00000000..f9151cfa
--- /dev/null
+++ b/skills/cloud-design-patterns/references/best-practices.md
@@ -0,0 +1,34 @@
+# Best Practices for Pattern Selection
+
+## Selecting Appropriate Patterns
+
+- **Understand the problem**: Clearly identify the specific challenge before choosing a pattern
+- **Consider trade-offs**: Each pattern introduces complexity and trade-offs
+- **Combine patterns**: Many patterns work better together (Circuit Breaker + Retry, CQRS + Event Sourcing)
+- **Start simple**: Don't over-engineer; apply patterns when the need is clear
+- **Platform-specific**: Consider Azure services that implement patterns natively
+
+## Well-Architected Framework Alignment
+
+Map selected patterns to Well-Architected Framework pillars:
+- **Reliability**: Circuit Breaker, Bulkhead, Retry, Health Endpoint Monitoring
+- **Security**: Federated Identity, Valet Key, Gateway Offloading, Quarantine
+- **Cost Optimization**: Compute Resource Consolidation, Static Content Hosting, Throttling
+- **Operational Excellence**: External Configuration Store, Sidecar, Deployment Stamps
+- **Performance Efficiency**: Cache-Aside, CQRS, Materialized View, Sharding
+
+## Pattern Documentation
+
+When implementing patterns, document:
+- Which pattern is being used and why
+- Trade-offs accepted
+- Configuration and tuning parameters
+- Monitoring and observability approach
+- Failure scenarios and recovery procedures
+
+## Monitoring Patterns
+
+- Implement comprehensive observability for all patterns
+- Track pattern-specific metrics (circuit breaker state, cache hit ratio, queue depth)
+- Use distributed tracing for patterns involving multiple services
+- Alert on pattern degradation (circuit frequently open, high retry rates)
diff --git a/skills/cloud-design-patterns/references/deployment-operational.md b/skills/cloud-design-patterns/references/deployment-operational.md
new file mode 100644
index 00000000..f30bafe7
--- /dev/null
+++ b/skills/cloud-design-patterns/references/deployment-operational.md
@@ -0,0 +1,91 @@
+# Deployment & Operational Patterns
+
+## Compute Resource Consolidation Pattern
+
+**Problem**: Multiple tasks consume resources inefficiently when isolated.
+
+**Solution**: Consolidate multiple tasks or operations into a single computational unit.
+
+**When to Use**:
+- Reducing infrastructure costs
+- Improving resource utilization
+- Simplifying deployment and management
+
+**Implementation Considerations**:
+- Group related tasks with similar scaling requirements
+- Use containers or microservices hosting
+- Monitor resource usage per task
+- Ensure isolation where needed for security/reliability
+- Balance between consolidation and failure isolation
+
+## Deployment Stamps Pattern
+
+**Problem**: Applications need to scale across regions or customer segments.
+
+**Solution**: Deploy multiple independent copies of application components (stamps), including data stores, to serve different regions or customer segments.
+
+**When to Use**:
+- Scaling beyond single stamp limits
+- Providing regional data residency
+- Isolating tenants for security or performance
+
+**Implementation Considerations**:
+- Each stamp is a complete, self-contained deployment
+- Deploy stamps across regions for geo-distribution
+- Route requests to appropriate stamp
+- Manage stamp deployments consistently (IaC)
+- Plan for stamp capacity and when to add new stamps
+
+## External Configuration Store Pattern
+
+**Problem**: Application configuration is embedded in deployment packages.
+
+**Solution**: Move configuration information out of the application deployment package to a centralized location.
+
+**When to Use**:
+- Managing configuration across multiple environments
+- Updating configuration without redeployment
+- Sharing configuration across multiple applications
+
+**Implementation Considerations**:
+- Use Azure App Configuration, Key Vault, or similar services
+- Implement configuration change notifications
+- Cache configuration locally to reduce dependencies
+- Secure sensitive configuration (connection strings, secrets)
+- Version configuration changes
+
+## Geode Pattern
+
+**Problem**: Users in different regions experience high latency.
+
+**Solution**: Deploy backend services into a set of geographical nodes, each of which can service any client request in any region.
+
+**When to Use**:
+- Reducing latency for globally distributed users
+- Providing high availability across regions
+- Implementing active-active geo-distribution
+
+**Implementation Considerations**:
+- Deploy application instances in multiple regions
+- Replicate data globally (consider consistency implications)
+- Route users to nearest healthy region
+- Implement conflict resolution for multi-master writes
+- Monitor regional health and performance
+
+## Static Content Hosting Pattern
+
+**Problem**: Serving static content from compute instances is inefficient.
+
+**Solution**: Deploy static content to a cloud-based storage service that can deliver content directly to the client.
+
+**When to Use**:
+- Hosting images, videos, CSS, JavaScript files
+- Reducing load on web servers
+- Improving content delivery performance
+
+**Implementation Considerations**:
+- Use blob storage, CDN, or static website hosting
+- Enable CORS for cross-origin access
+- Implement caching headers appropriately
+- Use CDN for global content distribution
+- Secure content with SAS tokens if needed
diff --git a/skills/cloud-design-patterns/references/event-driven.md b/skills/cloud-design-patterns/references/event-driven.md
new file mode 100644
index 00000000..f0e14276
--- /dev/null
+++ b/skills/cloud-design-patterns/references/event-driven.md
@@ -0,0 +1,21 @@
+# Event-Driven Architecture Patterns
+
+## Event Sourcing Pattern
+
+**Problem**: Need complete audit trail of all changes to application state.
+
+**Solution**: Use an append-only store to record the full series of events that describe actions taken on data in a domain.
+
+**When to Use**:
+- Requiring complete audit trail
+- Implementing temporal queries (point-in-time state)
+- Supporting event replay and debugging
+- Implementing CQRS with eventual consistency
+
+**Implementation Considerations**:
+- Store events in append-only log
+- Rebuild current state by replaying events
+- Implement event versioning strategy
+- Handle event schema evolution
+- Consider storage growth over time
+- Implement snapshots for performance
diff --git a/skills/cloud-design-patterns/references/messaging-integration.md b/skills/cloud-design-patterns/references/messaging-integration.md
new file mode 100644
index 00000000..1da7cf19
--- /dev/null
+++ b/skills/cloud-design-patterns/references/messaging-integration.md
@@ -0,0 +1,127 @@
+# Messaging & Integration Patterns
+
+## Choreography Pattern
+
+**Problem**: Central orchestrators create coupling and single points of failure.
+
+**Solution**: Let individual services decide when and how a business operation is processed through event-driven collaboration.
+
+**When to Use**:
+- Loosely coupled microservices architectures
+- Event-driven systems
+- Avoiding central orchestration bottlenecks
+
+**Implementation Considerations**:
+- Use publish-subscribe messaging for event distribution
+- Each service publishes domain events and subscribes to relevant events
+- Implement saga pattern for complex workflows
+- Ensure idempotency as events may be delivered multiple times
+- Provide comprehensive logging and distributed tracing
+
+## Claim Check Pattern
+
+**Problem**: Large messages can overwhelm message infrastructure.
+
+**Solution**: Split a large message into a claim check (reference) and a payload stored separately.
+
+**When to Use**:
+- Messages exceed messaging system size limits
+- Reducing message bus load
+- Handling large file transfers asynchronously
+
+**Implementation Considerations**:
+- Store payload in blob storage or database
+- Send only reference/URI through message bus
+- Implement expiration policies for stored payloads
+- Handle access control for payload storage
+- Consider costs of storage vs message transmission
+
+## Competing Consumers Pattern
+
+**Problem**: Single consumer may not keep up with message volume.
+
+**Solution**: Enable multiple concurrent consumers to process messages from the same messaging channel.
+
+**When to Use**:
+- High message throughput requirements
+- Scaling message processing horizontally
+- Load balancing across multiple instances
+
+**Implementation Considerations**:
+- Ensure messages can be processed in any order
+- Use competing consumer queues (Service Bus, RabbitMQ)
+- Implement idempotency for message handlers
+- Handle poison messages with retry and dead-letter policies
+- Scale consumer count based on queue depth
+
+## Messaging Bridge Pattern
+
+**Problem**: Different systems use incompatible messaging technologies.
+
+**Solution**: Build an intermediary to enable communication between messaging systems that are otherwise incompatible.
+
+**When to Use**:
+- Migrating between messaging systems
+- Integrating with legacy systems
+- Connecting cloud and on-premises messaging
+
+**Implementation Considerations**:
+- Transform message formats between systems
+- Handle protocol differences
+- Maintain message ordering if required
+- Implement error handling and retry logic
+- Monitor bridge performance and health
+
+## Pipes and Filters Pattern
+
+**Problem**: Complex processing tasks are difficult to maintain and reuse.
+
+**Solution**: Break down a task that performs complex processing into a series of separate, reusable elements (filters) connected by channels (pipes).
+
+**When to Use**:
+- Processing data streams with multiple transformations
+- Building reusable processing components
+- Enabling parallel processing of independent operations
+
+**Implementation Considerations**:
+- Each filter performs a single transformation
+- Connect filters using message queues or streams
+- Enable parallel execution where possible
+- Handle errors within filters or at pipeline level
+- Support filter composition and reordering
+
+## Publisher-Subscriber Pattern
+
+**Problem**: Applications need to broadcast information to multiple interested consumers.
+
+**Solution**: Enable an application to announce events to multiple consumers asynchronously, without coupling senders to receivers.
+
+**When to Use**:
+- Broadcasting events to multiple interested parties
+- Decoupling event producers from consumers
+- Implementing event-driven architectures
+
+**Implementation Considerations**:
+- Use topic-based or content-based subscriptions
+- Ensure message delivery guarantees match requirements
+- Implement subscription filters for selective consumption
+- Handle consumer failures without affecting publishers
+- Consider message ordering requirements per subscriber
+
+## Scheduler Agent Supervisor Pattern
+
+**Problem**: Distributed actions need coordination and monitoring.
+
+**Solution**: Coordinate a set of actions across distributed services and resources with a supervisor that monitors and manages the workflow.
+
+**When to Use**:
+- Orchestrating multi-step workflows
+- Coordinating distributed transactions
+- Implementing resilient long-running processes
+
+**Implementation Considerations**:
+- Scheduler dispatches tasks to agents
+- Agents perform work and report status
+- Supervisor monitors progress and handles failures
+- Implement compensation logic for failed steps
+- Maintain state for workflow recovery
diff --git a/skills/cloud-design-patterns/references/performance.md b/skills/cloud-design-patterns/references/performance.md
new file mode 100644
index 00000000..db79cdca
--- /dev/null
+++ b/skills/cloud-design-patterns/references/performance.md
@@ -0,0 +1,180 @@
+# Performance Patterns
+
+## Asynchronous Request-Reply Pattern
+
+**Problem**: Client applications expect synchronous responses, but back-end processing is asynchronous.
+
+**Solution**: Decouple back-end processing from a front-end host where back-end processing must be asynchronous, but the front end requires a clear response.
+
+**When to Use**:
+- Long-running back-end operations
+- Client applications can't wait for synchronous responses
+- Offloading compute-intensive operations from web tier
+
+**Implementation Considerations**:
+- Return HTTP 202 (Accepted) with location header for status checking
+- Implement status endpoint for clients to poll
+- Consider webhooks for callback notifications
+- Use correlation IDs to track requests
+- Implement timeouts for long-running operations
+
+## Cache-Aside Pattern
+
+**Problem**: Applications repeatedly access the same data from a data store.
+
+**Solution**: Load data on demand into a cache from a data store when needed.
+
+**When to Use**:
+- Frequently accessed, read-heavy data
+- Data that changes infrequently
+- Reducing load on primary data store
+
+**Implementation Considerations**:
+- Check cache before accessing data store
+- Load data into cache on cache miss (lazy loading)
+- Set appropriate cache expiration policies
+- Implement cache invalidation strategies
+- Handle cache failures gracefully (fallback to data store)
+- Consider cache coherency in distributed scenarios
+
+## CQRS (Command Query Responsibility Segregation) Pattern
+
+**Problem**: Read and write workloads have different requirements and scaling needs.
+
+**Solution**: Separate operations that read data from those that update data by using distinct interfaces.
+
+**When to Use**:
+- Read and write workloads have vastly different performance characteristics
+- Different teams work on read and write sides
+- Need to prevent merge conflicts in collaborative scenarios
+- Complex business logic differs between reads and writes
+
+**Implementation Considerations**:
+- Separate read and write models
+- Use event sourcing to synchronize models
+- Scale read and write sides independently
+- Consider eventual consistency implications
+- Implement appropriate security for commands vs queries
+
+## Index Table Pattern
+
+**Problem**: Queries frequently reference fields that aren't indexed efficiently.
+
+**Solution**: Create indexes over the fields in data stores that queries frequently reference.
+
+**When to Use**:
+- Improving query performance
+- Supporting multiple query patterns
+- Working with NoSQL databases without native indexing
+
+**Implementation Considerations**:
+- Create separate tables/collections optimized for specific queries
+- Maintain indexes asynchronously using events or triggers
+- Consider storage overhead of duplicate data
+- Handle index update failures and inconsistencies
+
+## Materialized View Pattern
+
+**Problem**: Data is poorly formatted for required query operations.
+
+**Solution**: Generate prepopulated views over the data in one or more data stores when the data isn't ideally formatted for query operations.
+
+**When to Use**:
+- Complex queries over normalized data
+- Improving read performance for complex joins/aggregations
+- Supporting multiple query patterns efficiently
+
+**Implementation Considerations**:
+- Refresh views asynchronously using background jobs or triggers
+- Consider staleness tolerance for materialized data
+- Balance between storage cost and query performance
+- Implement incremental refresh where possible
+
+## Priority Queue Pattern
+
+**Problem**: Some requests need faster processing than others.
+
+**Solution**: Prioritize requests sent to services so that requests with a higher priority are processed more quickly.
+
+**When to Use**:
+- Providing different service levels to different customers
+- Processing critical operations before less important ones
+- Managing mixed workloads with varying importance
+
+**Implementation Considerations**:
+- Use message priority metadata
+- Implement multiple queues for different priority levels
+- Prevent starvation of low-priority messages
+- Monitor queue depths and processing times per priority
+
+## Queue-Based Load Leveling Pattern
+
+**Problem**: Intermittent heavy loads can overwhelm services.
+
+**Solution**: Use a queue as a buffer between a task and a service to smooth intermittent heavy loads.
+
+**When to Use**:
+- Protecting services from traffic spikes
+- Decoupling producers and consumers
+- Enabling asynchronous processing
+
+**Implementation Considerations**:
+- Choose appropriate queue technology (Azure Storage Queue, Service Bus, etc.)
+- Monitor queue length to detect saturation
+- Implement auto-scaling based on queue depth
+- Set appropriate message time-to-live (TTL)
+- Handle poison messages with dead-letter queues
+
+## Rate Limiting Pattern
+
+**Problem**: Service consumption must be controlled to prevent resource exhaustion.
+
+**Solution**: Control the consumption of resources by applications, tenants, or services to prevent resource exhaustion and throttling.
+
+**When to Use**:
+- Protecting backend services from overload
+- Implementing fair usage policies
+- Preventing one tenant from monopolizing resources
+
+**Implementation Considerations**:
+- Implement token bucket, leaky bucket, or fixed window algorithms
+- Return HTTP 429 (Too Many Requests) when limits exceeded
+- Provide Retry-After headers to clients
+- Consider different limits for different clients/tiers
+- Make limits configurable and monitorable
+
+## Sharding Pattern
+
+**Problem**: A single data store may have limitations in storage capacity and performance.
+
+**Solution**: Divide a data store into a set of horizontal partitions or shards.
+
+**When to Use**:
+- Scaling beyond single database limits
+- Improving query performance by reducing dataset size
+- Distributing load across multiple databases
+
+**Implementation Considerations**:
+- Choose appropriate shard key (hash, range, or list-based)
+- Avoid hot partitions by selecting balanced shard keys
+- Handle cross-shard queries carefully
+- Plan for shard rebalancing and splitting
+- Consider operational complexity of managing multiple shards
+
+## Throttling Pattern
+
+**Problem**: Resource consumption must be limited to prevent system overload.
+
+**Solution**: Control the consumption of resources used by an application, tenant, or service.
+
+**When to Use**:
+- Ensuring system operates within defined capacity
+- Preventing resource exhaustion during peak load
+- Enforcing SLA-based resource allocation
+
+**Implementation Considerations**:
+- Implement at API gateway or service level
+- Use different strategies: reject requests, queue, or degrade service
+- Return appropriate HTTP status codes (429, 503)
+- Provide clear feedback to clients about throttling
+- Monitor throttling metrics to adjust capacity
diff --git a/skills/cloud-design-patterns/references/reliability-resilience.md b/skills/cloud-design-patterns/references/reliability-resilience.md
new file mode 100644
index 00000000..208b92d8
--- /dev/null
+++ b/skills/cloud-design-patterns/references/reliability-resilience.md
@@ -0,0 +1,156 @@
+# Reliability & Resilience Patterns
+
+## Ambassador Pattern
+
+**Problem**: Services need proxy functionality for network requests (logging, monitoring, routing, security).
+
+**Solution**: Create helper services that send network requests on behalf of a consumer service or application.
+
+**When to Use**:
+- Offloading common client connectivity tasks (monitoring, logging, routing)
+- Supporting legacy applications that can't be easily modified
+- Implementing retry logic, circuit breakers, or timeout handling for remote services
+
+**Implementation Considerations**:
+- Deploy ambassador as a sidecar process or container with the application
+- Consider network latency introduced by the proxy layer
+- Ensure ambassador doesn't become a single point of failure
+
+## Bulkhead Pattern
+
+**Problem**: A failure in one component can cascade and affect the entire system.
+
+**Solution**: Isolate elements of an application into pools so that if one fails, the others continue to function.
+
+**When to Use**:
+- Isolating critical resources from less critical ones
+- Preventing resource exhaustion in one area from affecting others
+- Partitioning consumers and resources to improve availability
+
+**Implementation Considerations**:
+- Separate connection pools for different backends
+- Partition service instances across different groups
+- Use resource limits (CPU, memory, threads) per partition
+- Monitor bulkhead health and capacity
+
+## Circuit Breaker Pattern
+
+**Problem**: Applications can waste resources attempting operations that are likely to fail.
+
+**Solution**: Prevent an application from repeatedly trying to execute an operation that's likely to fail, allowing it to continue without waiting for the fault to be fixed.
+
+**When to Use**:
+- Protecting against cascading failures
+- Failing fast when a remote service is unavailable
+- Providing fallback behavior when services are down
+
+**Implementation Considerations**:
+- Define threshold for triggering circuit breaker (failures/time window)
+- Implement three states: Closed, Open, Half-Open
+- Set appropriate timeout values for operations
+- Log state transitions and failures for diagnostics
+- Provide meaningful error messages to clients
+
+## Compensating Transaction Pattern
+
+**Problem**: Distributed transactions are difficult to implement and may not be supported.
+
+**Solution**: Undo the work performed by a sequence of steps that collectively form an eventually consistent operation.
+
+**When to Use**:
+- Implementing eventual consistency in distributed systems
+- Rolling back multi-step business processes that fail partway through
+- Handling long-running transactions that can't use 2PC
+
+**Implementation Considerations**:
+- Define compensating logic for each step in transaction
+- Store enough state to undo operations
+- Handle idempotency for compensation operations
+- Consider ordering dependencies between compensating actions
+
+## Retry Pattern
+
+**Problem**: Transient failures are common in distributed systems.
+
+**Solution**: Enable applications to handle anticipated temporary failures by retrying failed operations.
+
+**When to Use**:
+- Handling transient faults (network glitches, temporary unavailability)
+- Operations expected to succeed after a brief delay
+- Non-idempotent operations with careful consideration
+
+**Implementation Considerations**:
+- Implement exponential backoff between retries
+- Set maximum retry count to avoid infinite loops
+- Distinguish between transient and permanent failures
+- Ensure operations are idempotent or track retry attempts
+- Consider jitter to avoid thundering herd problem
+
+## Health Endpoint Monitoring Pattern
+
+**Problem**: External tools need to verify system health and availability.
+
+**Solution**: Implement functional checks in an application that external tools can access through exposed endpoints at regular intervals.
+
+**When to Use**:
+- Monitoring web applications and back-end services
+- Implementing readiness and liveness probes
+- Providing detailed health information to orchestrators
+
+**Implementation Considerations**:
+- Expose health endpoints (e.g., `/health`, `/ready`, `/live`)
+- Check critical dependencies (databases, queues, external services)
+- Return appropriate HTTP status codes (200, 503)
+- Implement authentication/authorization for sensitive health data
+- Provide different levels of detail based on security context
+
+## Leader Election Pattern
+
+**Problem**: Distributed tasks need coordination through a single instance.
+
+**Solution**: Coordinate actions in a distributed application by electing one instance as the leader that manages collaborating task instances.
+
+**When to Use**:
+- Coordinating distributed tasks
+- Managing shared resources in a cluster
+- Ensuring single-instance execution of critical tasks
+
+**Implementation Considerations**:
+- Use distributed locking mechanisms (Redis, etcd, ZooKeeper)
+- Handle leader failures with automatic re-election
+- Implement heartbeats to detect leader health
+- Ensure followers can become leaders quickly
+
+## Saga Pattern
+
+**Problem**: Maintaining data consistency across microservices without distributed transactions.
+
+**Solution**: Manage data consistency across microservices in distributed transaction scenarios using a sequence of local transactions.
+
+**When to Use**:
+- Long-running business processes spanning multiple services
+- Distributed transactions without 2PC support
+- Eventual consistency requirements across microservices
+
+**Implementation Considerations**:
+- Choose between orchestration (centralized) or choreography (event-based)
+- Define compensating transactions for rollback scenarios
+- Handle partial failures and rollback logic
+- Implement idempotency for all saga steps
+- Provide clear audit trails and monitoring
+
+## Sequential Convoy Pattern
+
+**Problem**: Process related messages in order without blocking independent message groups.
+
+**Solution**: Process a set of related messages in a defined order without blocking other message groups.
+
+**When to Use**:
+- Message processing requires strict ordering within groups
+- Independent message groups can be processed in parallel
+- Implementing session-based message processing
+
+**Implementation Considerations**:
+- Use session IDs or partition keys to group related messages
+- Process each group sequentially but process groups in parallel
+- Handle message failures within a session appropriately
diff --git a/skills/cloud-design-patterns/references/security.md b/skills/cloud-design-patterns/references/security.md
new file mode 100644
index 00000000..82c3a088
--- /dev/null
+++ b/skills/cloud-design-patterns/references/security.md
@@ -0,0 +1,55 @@
+# Security Patterns
+
+## Federated Identity Pattern
+
+**Problem**: Applications must manage user authentication and authorization.
+
+**Solution**: Delegate authentication to an external identity provider.
+
+**When to Use**:
+- Implementing single sign-on (SSO)
+- Reducing authentication complexity
+- Supporting social identity providers
+
+**Implementation Considerations**:
+- Use Azure AD, Auth0, or other identity providers
+- Implement OAuth 2.0, OpenID Connect, or SAML
+- Store minimal user data locally
+- Handle identity provider outages gracefully
+- Implement proper token validation
+
+## Quarantine Pattern
+
+**Problem**: External assets may contain malicious content or vulnerabilities.
+
+**Solution**: Ensure that external assets meet a team-agreed quality level before the workload consumes them.
+
+**When to Use**:
+- Processing user-uploaded files
+- Consuming external data or packages
+- Implementing zero-trust architectures
+
+**Implementation Considerations**:
+- Scan all external content before use (malware, vulnerabilities)
+- Isolate quarantine environment from production
+- Define clear quality gates for release
+- Implement automated scanning and validation
+- Log all quarantine activities for audit
+
+## Valet Key Pattern
+
+**Problem**: Applications shouldn't proxy all client data access.
+
+**Solution**: Use a token or key that provides clients with restricted direct access to a specific resource or service.
+
+**When to Use**:
+- Providing direct access to storage without proxying
+- Minimizing data transfer through application tier
+- Implementing time-limited or constrained access
+
+**Implementation Considerations**:
+- Generate SAS tokens or pre-signed URLs
+- Set appropriate expiration times
+- Limit permissions (read-only, write-only, specific operations)
+- Implement token revocation if needed
+- Monitor usage of valet keys
diff --git a/skills/copilot-spaces/SKILL.md b/skills/copilot-spaces/SKILL.md
new file mode 100644
index 00000000..616952af
--- /dev/null
+++ b/skills/copilot-spaces/SKILL.md
@@ -0,0 +1,205 @@
+---
+name: copilot-spaces
+description: 'Use Copilot Spaces to provide project-specific context to conversations. Use this skill when users mention a "Copilot space", want to load context from a shared knowledge base, discover available spaces, or ask questions grounded in curated project documentation, code, and instructions.'
+---
+
+# Copilot Spaces
+
+Use Copilot Spaces to bring curated, project-specific context into conversations. A Space is a shared collection of repositories, files, documentation, and instructions that grounds Copilot responses in your team's actual code and knowledge.
+
+## Available Tools
+
+### MCP Tools (Read-only)
+
+| Tool | Purpose |
+|------|---------|
+| `mcp__github__list_copilot_spaces` | List all spaces accessible to the current user |
+| `mcp__github__get_copilot_space` | Load a space's full context by owner and name |
+
+### REST API via `gh api` (Full CRUD)
+
+The Spaces REST API supports creating, updating, deleting spaces, and managing collaborators. The MCP server only exposes read operations, so use `gh api` for writes.
+
+**User Spaces:**
+
+| Method | Endpoint | Purpose |
+|--------|----------|---------|
+| `POST` | `/users/{username}/copilot-spaces` | Create a space |
+| `GET` | `/users/{username}/copilot-spaces` | List spaces |
+| `GET` | `/users/{username}/copilot-spaces/{number}` | Get a space |
+| `PUT` | `/users/{username}/copilot-spaces/{number}` | Update a space |
+| `DELETE` | `/users/{username}/copilot-spaces/{number}` | Delete a space |
+
+**Organization Spaces:** Same pattern under `/orgs/{org}/copilot-spaces/...`
+
+**Collaborators:** Add, list, update, and remove collaborators at `.../collaborators`
+
+**Scope requirements:** PAT needs `read:user` for reads, `user` for writes. Add with `gh auth refresh -h github.com -s user`.
+
+**Note:** This API is functional but not yet in the public REST API docs. It may require the `copilot_spaces_api` feature flag.
+
+## When to Use Spaces
+
+- User mentions "Copilot space" or asks to "load a space"
+- User wants answers grounded in specific project docs, code, or standards
+- User asks "what spaces are available?" or "find a space for X"
+- User needs onboarding context, architecture docs, or team-specific guidance
+- User wants to follow a structured workflow defined in a Space (templates, checklists, multi-step processes)
+
+## Workflow
+
+### 1. Discover Spaces
+
+When a user asks what spaces are available or you need to find the right space:
+
+```
+Call mcp__github__list_copilot_spaces
+```
+
+This returns all spaces the user can access, each with a `name` and `owner_login`. Present relevant matches to the user.
+
+To filter for a specific user's spaces, match `owner_login` against the username (e.g., "show me my spaces").
+
+### 2. Load a Space
+
+When a user names a specific space or you've identified the right one:
+
+```
+Call mcp__github__get_copilot_space with:
+ owner: "org-or-user" (the owner_login from the list)
+ name: "Space Name" (exact space name, case-sensitive)
+```
+
+This returns the space's full content: attached documentation, code context, custom instructions, and any other curated materials. Use this context to inform your responses.
+
+### 3. Follow the Breadcrumbs
+
+Space content often references external resources: GitHub issues, dashboards, repos, discussions, or other tools. Proactively fetch these using other MCP tools to gather complete context. For example:
+- A space references an initiative tracking issue. Use `issue_read` to get the latest comments.
+- A space links to a project board. Use project tools to check current status.
+- A space mentions a repo's masterplan. Use `get_file_contents` to read it.
+
+### 4. Answer or Execute
+
+Once loaded, use the space content based on what it contains:
+
+**If the space contains reference material** (docs, code, standards):
+- Answer questions about the project's architecture, patterns, or standards
+- Generate code that follows the team's conventions
+- Debug issues using project-specific knowledge
+
+**If the space contains workflow instructions** (templates, step-by-step processes):
+- Follow the workflow as defined, step by step
+- Gather data from the sources the workflow specifies
+- Produce output in the format the workflow defines
+- Show progress after each step so the user can steer
+
+### 5. Manage Spaces (via `gh api`)
+
+When a user wants to create, update, or delete a space, use `gh api`. First, find the space number from the list endpoint.
+
+**Update a space's instructions:**
+```bash
+gh api users/{username}/copilot-spaces/{number} \
+ -X PUT \
+ -f general_instructions="New instructions here"
+```
+
+**Update name, description, or instructions together:**
+```bash
+gh api users/{username}/copilot-spaces/{number} \
+ -X PUT \
+ -f name="Updated Name" \
+ -f description="Updated description" \
+ -f general_instructions="Updated instructions"
+```
+
+**Create a new space:**
+```bash
+gh api users/{username}/copilot-spaces \
+ -X POST \
+ -f name="My New Space" \
+ -f general_instructions="Help me with..." \
+ -f visibility="private"
+```
+
+**Attach resources (replaces entire resource list):**
+```json
+{
+ "resources_attributes": [
+ { "resource_type": "free_text", "metadata": { "name": "Notes", "text": "Content here" } },
+ { "resource_type": "github_issue", "metadata": { "repository_id": 12345, "number": 42 } },
+ { "resource_type": "github_file", "metadata": { "repository_id": 12345, "file_path": "docs/guide.md" } }
+ ]
+}
+```
+
+**Delete a space:**
+```bash
+gh api users/{username}/copilot-spaces/{number} -X DELETE
+```
+
+**Updatable fields:** `name`, `description`, `general_instructions`, `icon_type`, `icon_color`, `visibility` ("private"/"public"), `base_role` ("no_access"/"reader"), `resources_attributes`
+
+## Examples
+
+### Example 1: User Asks for a Space
+
+**User**: "Load the Accessibility copilot space"
+
+**Action**:
+1. Call `mcp__github__get_copilot_space` with owner `"github"`, name `"Accessibility"`
+2. Use the returned context to answer questions about accessibility standards, MAS grades, compliance processes, etc.
+
+### Example 2: User Wants to Find Spaces
+
+**User**: "What copilot spaces are available for our team?"
+
+**Action**:
+1. Call `mcp__github__list_copilot_spaces`
+2. Filter/present spaces relevant to the user's org or interests
+3. Offer to load any space they're interested in
+
+### Example 3: Context-Grounded Question
+
+**User**: "Using the security space, what's our policy on secret scanning?"
+
+**Action**:
+1. Call `mcp__github__get_copilot_space` with the appropriate owner and name
+2. Find the relevant policy in the space content
+3. Answer based on the actual internal documentation
+
+### Example 4: Space as a Workflow Engine
+
+**User**: "Write my weekly update using the PM Weekly Updates space"
+
+**Action**:
+1. Call `mcp__github__get_copilot_space` to load the space. It contains a template format and step-by-step instructions.
+2. Follow the space's workflow: pull data from attached initiative issues, gather metrics, draft each section.
+3. Fetch external resources referenced by the space (tracking issues, dashboards) using other MCP tools.
+4. Show the draft after each section so the user can review and fill in gaps.
+5. Produce the final output in the format the space defines.
+
+### Example 5: Update Space Instructions Programmatically
+
+**User**: "Update my PM Weekly Updates space to include a new writing guideline"
+
+**Action**:
+1. Call `mcp__github__list_copilot_spaces` and find the space number (e.g., 19).
+2. Call `mcp__github__get_copilot_space` to read current instructions.
+3. Modify the instructions text as requested.
+4. Push the update:
+```bash
+gh api users/labudis/copilot-spaces/19 -X PUT -f general_instructions="updated instructions..."
+```
+
+## Tips
+
+- Space names are **case-sensitive**. Use the exact name from `list_copilot_spaces`.
+- Spaces can be owned by users or organizations. Always provide both `owner` and `name`.
+- Space content can be large (20KB+). If returned as a temp file, use grep or view_range to find relevant sections rather than reading everything at once.
+- If a space isn't found, suggest listing available spaces to find the right name.
+- Spaces auto-update as underlying repos change, so the context is always current.
+- Some spaces contain custom instructions that should guide your behavior (coding standards, preferred patterns, workflows). Treat these as directives, not suggestions.
+- **Write operations** (`gh api` for create/update/delete) require the `user` PAT scope. If you get a 404 on write operations, run `gh auth refresh -h github.com -s user`.
+- Resource updates **replace the entire array**. To add a resource, include all existing resources plus the new one. To remove one, include `{ "id": 123, "_destroy": true }` in the array.
diff --git a/skills/creating-oracle-to-postgres-master-migration-plan/SKILL.md b/skills/creating-oracle-to-postgres-master-migration-plan/SKILL.md
new file mode 100644
index 00000000..5db371e2
--- /dev/null
+++ b/skills/creating-oracle-to-postgres-master-migration-plan/SKILL.md
@@ -0,0 +1,83 @@
+---
+name: creating-oracle-to-postgres-master-migration-plan
+description: 'Discovers all projects in a .NET solution, classifies each for Oracle-to-PostgreSQL migration eligibility, and produces a persistent master migration plan. Use when starting a multi-project Oracle-to-PostgreSQL migration, creating a migration inventory, or assessing which .NET projects contain Oracle dependencies.'
+---
+
+# Creating an Oracle-to-PostgreSQL Master Migration Plan
+
+Analyze a .NET solution, classify every project for OracleβPostgreSQL migration eligibility, and write a structured plan that downstream agents and skills can parse.
+
+## Workflow
+
+```
+Progress:
+- [ ] Step 1: Discover projects in the solution
+- [ ] Step 2: Classify each project
+- [ ] Step 3: Confirm with user
+- [ ] Step 4: Write the plan file
+```
+
+**Step 1: Discover projects**
+
+Find the Solution File (it has a `.sln` or `.slnx` extension) in the workspace root (ask the user if multiple exist). Parse it to extract all `.csproj` project references. For each project, note the name, path, and type (class library, web API, console, test, etc.).
+
+**Step 2: Classify each project**
+
+Scan every non-test project for Oracle indicators:
+
+- NuGet references: `Oracle.ManagedDataAccess`, `Oracle.EntityFrameworkCore` (check `.csproj` and `packages.config`)
+- Config entries: Oracle connection strings in `appsettings.json`, `web.config`, `app.config`
+- Code usage: `OracleConnection`, `OracleCommand`, `OracleDataReader`
+- DDL cross-references under `.github/oracle-to-postgres-migration/DDL/Oracle/` (if present)
+
+Assign one classification per project:
+
+| Classification | Meaning |
+|---|---|
+| **MIGRATE** | Has Oracle interactions requiring conversion |
+| **SKIP** | No Oracle indicators (UI-only, shared utility, etc.) |
+| **ALREADY_MIGRATED** | A `-postgres` or `.Postgres` duplicate exists and appears processed |
+| **TEST_PROJECT** | Test project; handled by the testing workflow |
+
+**Step 3: Confirm with user**
+
+Present the classified list. Let the user adjust classifications or migration ordering before finalizing.
+
+**Step 4: Write the plan file**
+
+Save to: `.github/oracle-to-postgres-migration/Reports/Master Migration Plan.md`
+
+Use this exact template β downstream consumers depend on the structure:
+
+````markdown
+# Master Migration Plan
+
+**Solution:** {solution file name}
+**Solution Root:** {REPOSITORY_ROOT}
+**Created:** {timestamp}
+**Last Updated:** {timestamp}
+
+## Solution Summary
+
+| Metric | Count |
+|--------|-------|
+| Total projects in solution | {n} |
+| Projects requiring migration | {n} |
+| Projects already migrated | {n} |
+| Projects skipped (no Oracle usage) | {n} |
+| Test projects (handled separately) | {n} |
+
+## Project Inventory
+
+| # | Project Name | Path | Classification | Notes |
+|---|---|---|---|---|
+| 1 | {name} | {relative path} | MIGRATE | {notes} |
+| 2 | {name} | {relative path} | SKIP | No Oracle dependencies |
+
+## Migration Order
+
+1. **{ProjectName}** β {rationale, e.g., "Core data access library; other projects depend on it."}
+2. **{ProjectName}** β {rationale}
+````
+
+Order projects so that shared/foundational libraries are migrated before their dependents.
diff --git a/skills/creating-oracle-to-postgres-migration-bug-report/SKILL.md b/skills/creating-oracle-to-postgres-migration-bug-report/SKILL.md
new file mode 100644
index 00000000..dc3677d7
--- /dev/null
+++ b/skills/creating-oracle-to-postgres-migration-bug-report/SKILL.md
@@ -0,0 +1,43 @@
+---
+name: creating-oracle-to-postgres-migration-bug-report
+description: 'Creates structured bug reports for defects found during Oracle-to-PostgreSQL migration. Use when documenting behavioral differences between Oracle and PostgreSQL as actionable bug reports with severity, root cause, and remediation steps.'
+---
+
+# Creating Bug Reports for Oracle-to-PostgreSQL Migration
+
+## When to Use
+
+- Documenting a defect caused by behavioral differences between Oracle and PostgreSQL
+- Writing or reviewing a bug report for an Oracle-to-PostgreSQL migration project
+
+## Bug Report Format
+
+Use the template in [references/BUG-REPORT-TEMPLATE.md](references/BUG-REPORT-TEMPLATE.md). Each report must include:
+
+- **Status**: β RESOLVED, β UNRESOLVED, or β³ IN PROGRESS
+- **Component**: Affected endpoint, repository, or stored procedure
+- **Test**: Related automated test names
+- **Severity**: Low / Medium / High / Critical β based on impact scope
+- **Problem**: Expected Oracle behavior vs. observed PostgreSQL behavior
+- **Scenario**: Ordered reproduction steps with seed data, operation, expected result, and actual result
+- **Root Cause**: The specific Oracle/PostgreSQL behavioral difference causing the defect
+- **Solution**: Changes made or required, with explicit file paths
+- **Validation**: Steps to confirm the fix on both databases
+
+## Oracle-to-PostgreSQL Guidance
+
+- **Oracle is the source of truth** β frame expected behavior from the Oracle baseline
+- Call out data layer nuances explicitly: empty string vs. NULL, type coercion strictness, collation, sequence values, time zones, padding, constraints
+- Client code changes should be avoided unless required for correct behavior; when proposed, document and justify them clearly
+
+## Writing Style
+
+- Plain language, short sentences, clear next actions
+- Present or past tense consistently
+- Bullets and numbered lists for steps and validations
+- Minimal SQL excerpts and logs as evidence; omit sensitive data and keep snippets reproducible
+- Stick to existing runtime/language versions; avoid speculative fixes
+
+## Filename Convention
+
+Save bug reports as `BUG_REPORT_.md` where `` is a short PascalCase identifier (e.g., `EmptyStringNullHandling`, `RefCursorUnwrapFailure`).
diff --git a/skills/creating-oracle-to-postgres-migration-bug-report/references/BUG-REPORT-TEMPLATE.md b/skills/creating-oracle-to-postgres-migration-bug-report/references/BUG-REPORT-TEMPLATE.md
new file mode 100644
index 00000000..0e2b21d2
--- /dev/null
+++ b/skills/creating-oracle-to-postgres-migration-bug-report/references/BUG-REPORT-TEMPLATE.md
@@ -0,0 +1,79 @@
+# Bug Report Template
+
+Use this template when creating bug reports for Oracle-to-PostgreSQL migration defects.
+
+## Filename Format
+
+```
+BUG_REPORT_.md
+```
+
+## Template Structure
+
+```markdown
+# Bug Report:
+
+**Status:** β RESOLVED | β UNRESOLVED | β³ IN PROGRESS
+**Component:**
+**Test:**
+**Severity:** Low | Medium | High | Critical
+
+---
+
+## Problem
+
+
+
+## Scenario
+
+
+
+## Root Cause
+
+
+
+## Solution
+
+
+
+## Validation
+
+
+
+## Files Modified
+
+
+
+## Notes / Next Steps
+
+
+```
+
+## Status Values
+
+| Status | Meaning |
+|--------|---------|
+| β RESOLVED | Defect has been fixed and verified |
+| β UNRESOLVED | Defect has not been addressed yet |
+| β³ IN PROGRESS | Defect is being investigated or fix is underway |
+
+## Style Rules
+
+- Keep wording concise and factual
+- Use present or past tense consistently
+- Prefer bullets and numbered lists for steps and validation
+- Call out data layer nuances (tracking, padding, constraints) explicitly
+- Keep to existing runtime/language versions; avoid speculative fixes
+- Include minimal SQL excerpts and logs as evidence; omit sensitive data
diff --git a/skills/creating-oracle-to-postgres-migration-integration-tests/SKILL.md b/skills/creating-oracle-to-postgres-migration-integration-tests/SKILL.md
new file mode 100644
index 00000000..b4018380
--- /dev/null
+++ b/skills/creating-oracle-to-postgres-migration-integration-tests/SKILL.md
@@ -0,0 +1,60 @@
+---
+name: creating-oracle-to-postgres-migration-integration-tests
+description: 'Creates integration test cases for .NET data access artifacts during Oracle-to-PostgreSQL database migrations. Generates DB-agnostic xUnit tests with deterministic seed data that validate behavior consistency across both database systems. Use when creating integration tests for a migrated project, generating test coverage for data access layers, or writing Oracle-to-PostgreSQL migration validation tests.'
+---
+
+# Creating Integration Tests for Oracle-to-PostgreSQL Migration
+
+Generates integration test cases for data access artifacts in a single target project. Tests validate behavior consistency when running against Oracle or PostgreSQL.
+
+## Prerequisites
+
+- The test project must already exist and compile (scaffolded separately).
+- Read the existing base test class and seed manager conventions before writing tests.
+
+## Workflow
+
+```
+Test Creation:
+- [ ] Step 1: Discover the test project conventions
+- [ ] Step 2: Identify testable data access artifacts
+- [ ] Step 3: Create seed data
+- [ ] Step 4: Write test cases
+- [ ] Step 5: Review determinism
+```
+
+**Step 1: Discover the test project conventions**
+
+Read the base test class, seed manager, and project file to understand inheritance patterns, transaction management, and seed file conventions.
+
+**Step 2: Identify testable data access artifacts**
+
+Scope to the target project only. List data access methods that interact with the database β repositories, DAOs, stored procedure callers, query builders.
+
+**Step 3: Create seed data**
+
+- Follow seed file location and naming conventions from the existing project.
+- Reuse existing seed files when possible.
+- Avoid `TRUNCATE TABLE` β keep existing database data intact.
+- Do not commit seed data; tests run in transactions that roll back.
+- Ensure seed data does not conflict with other tests.
+- Load and verify seed data before assertions depend on it.
+
+**Step 4: Write test cases**
+
+- Inherit from the base test class to get automatic transaction create/rollback.
+- Assert logical outputs (rows, columns, counts, error types), not platform-specific messages.
+- Assert specific expected values β never assert that a value is merely non-null or non-empty when a concrete value is available from seed data.
+- Avoid testing code paths that do not exist or asserting behavior that cannot occur.
+- Avoid redundant assertions across tests targeting the same method.
+
+**Step 5: Review determinism**
+
+Re-examine every assertion against non-null values. Confirm each is deterministic against the seeded data. Fix any assertion that depends on database state outside the test's control.
+
+## Key Constraints
+
+- **Oracle is the golden source** β tests capture Oracle's expected behavior.
+- **DB-agnostic assertions** β no platform-specific error messages or syntax in assertions.
+- **Seed only against Oracle** β test project will be migrated to PostgreSQL later.
+- **Scoped to one project** β do not create tests for artifacts outside the target project.
diff --git a/skills/flowstudio-power-automate-build/SKILL.md b/skills/flowstudio-power-automate-build/SKILL.md
new file mode 100644
index 00000000..25112118
--- /dev/null
+++ b/skills/flowstudio-power-automate-build/SKILL.md
@@ -0,0 +1,460 @@
+---
+name: flowstudio-power-automate-build
+description: >-
+ Build, scaffold, and deploy Power Automate cloud flows using the FlowStudio
+ MCP server. Load this skill when asked to: create a flow, build a new flow,
+ deploy a flow definition, scaffold a Power Automate workflow, construct a flow
+ JSON, update an existing flow's actions, patch a flow definition, add actions
+ to a flow, wire up connections, or generate a workflow definition from scratch.
+ Requires a FlowStudio MCP subscription β see https://mcp.flowstudio.app
+---
+
+# Build & Deploy Power Automate Flows with FlowStudio MCP
+
+Step-by-step guide for constructing and deploying Power Automate cloud flows
+programmatically through the FlowStudio MCP server.
+
+**Prerequisite**: A FlowStudio MCP server must be reachable with a valid JWT.
+See the `flowstudio-power-automate-mcp` skill for connection setup.
+Subscribe at https://mcp.flowstudio.app
+
+---
+
+## Source of Truth
+
+> **Always call `tools/list` first** to confirm available tool names and their
+> parameter schemas. Tool names and parameters may change between server versions.
+> This skill covers response shapes, behavioral notes, and build patterns β
+> things `tools/list` cannot tell you. If this document disagrees with `tools/list`
+> or a real API response, the API wins.
+
+---
+
+## Python Helper
+
+```python
+import json, urllib.request
+
+MCP_URL = "https://mcp.flowstudio.app/mcp"
+MCP_TOKEN = ""
+
+def mcp(tool, **kwargs):
+ payload = json.dumps({"jsonrpc": "2.0", "id": 1, "method": "tools/call",
+ "params": {"name": tool, "arguments": kwargs}}).encode()
+ req = urllib.request.Request(MCP_URL, data=payload,
+ headers={"x-api-key": MCP_TOKEN, "Content-Type": "application/json",
+ "User-Agent": "FlowStudio-MCP/1.0"})
+ try:
+ resp = urllib.request.urlopen(req, timeout=120)
+ except urllib.error.HTTPError as e:
+ body = e.read().decode("utf-8", errors="replace")
+ raise RuntimeError(f"MCP HTTP {e.code}: {body[:200]}") from e
+ raw = json.loads(resp.read())
+ if "error" in raw:
+ raise RuntimeError(f"MCP error: {json.dumps(raw['error'])}")
+ return json.loads(raw["result"]["content"][0]["text"])
+
+ENV = "" # e.g. Default-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+```
+
+---
+
+## Step 1 β Safety Check: Does the Flow Already Exist?
+
+Always look before you build to avoid duplicates:
+
+```python
+results = mcp("list_store_flows",
+ environmentName=ENV, searchTerm="My New Flow")
+
+# list_store_flows returns a direct array (no wrapper object)
+if len(results) > 0:
+ # Flow exists β modify rather than create
+ # id format is "envId.flowId" β split to get the flow UUID
+ FLOW_ID = results[0]["id"].split(".", 1)[1]
+ print(f"Existing flow: {FLOW_ID}")
+ defn = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID)
+else:
+ print("Flow not found β building from scratch")
+ FLOW_ID = None
+```
+
+---
+
+## Step 2 β Obtain Connection References
+
+Every connector action needs a `connectionName` that points to a key in the
+flow's `connectionReferences` map. That key links to an authenticated connection
+in the environment.
+
+> **MANDATORY**: You MUST call `list_live_connections` first β do NOT ask the
+> user for connection names or GUIDs. The API returns the exact values you need.
+> Only prompt the user if the API confirms that required connections are missing.
+
+### 2a β Always call `list_live_connections` first
+
+```python
+conns = mcp("list_live_connections", environmentName=ENV)
+
+# Filter to connected (authenticated) connections only
+active = [c for c in conns["connections"]
+ if c["statuses"][0]["status"] == "Connected"]
+
+# Build a lookup: connectorName β connectionName (id)
+conn_map = {}
+for c in active:
+ conn_map[c["connectorName"]] = c["id"]
+
+print(f"Found {len(active)} active connections")
+print("Available connectors:", list(conn_map.keys()))
+```
+
+### 2b β Determine which connectors the flow needs
+
+Based on the flow you are building, identify which connectors are required.
+Common connector API names:
+
+| Connector | API name |
+|---|---|
+| SharePoint | `shared_sharepointonline` |
+| Outlook / Office 365 | `shared_office365` |
+| Teams | `shared_teams` |
+| Approvals | `shared_approvals` |
+| OneDrive for Business | `shared_onedriveforbusiness` |
+| Excel Online (Business) | `shared_excelonlinebusiness` |
+| Dataverse | `shared_commondataserviceforapps` |
+| Microsoft Forms | `shared_microsoftforms` |
+
+> **Flows that need NO connections** (e.g. Recurrence + Compose + HTTP only)
+> can skip the rest of Step 2 β omit `connectionReferences` from the deploy call.
+
+### 2c β If connections are missing, guide the user
+
+```python
+connectors_needed = ["shared_sharepointonline", "shared_office365"] # adjust per flow
+
+missing = [c for c in connectors_needed if c not in conn_map]
+
+if not missing:
+ print("β All required connections are available β proceeding to build")
+else:
+ # ββ STOP: connections must be created interactively ββ
+ # Connections require OAuth consent in a browser β no API can create them.
+ print("β οΈ The following connectors have no active connection in this environment:")
+ for c in missing:
+ friendly = c.replace("shared_", "").replace("onlinebusiness", " Online (Business)")
+ print(f" β’ {friendly} (API name: {c})")
+ print()
+ print("Please create the missing connections:")
+ print(" 1. Open https://make.powerautomate.com/connections")
+ print(" 2. Select the correct environment from the top-right picker")
+ print(" 3. Click '+ New connection' for each missing connector listed above")
+ print(" 4. Sign in and authorize when prompted")
+ print(" 5. Tell me when done β I will re-check and continue building")
+ # DO NOT proceed to Step 3 until the user confirms.
+ # After user confirms, re-run Step 2a to refresh conn_map.
+```
+
+### 2d β Build the connectionReferences block
+
+Only execute this after 2c confirms no missing connectors:
+
+```python
+connection_references = {}
+for connector in connectors_needed:
+ connection_references[connector] = {
+ "connectionName": conn_map[connector], # the GUID from list_live_connections
+ "source": "Invoker",
+ "id": f"/providers/Microsoft.PowerApps/apis/{connector}"
+ }
+```
+
+> **IMPORTANT β `host.connectionName` in actions**: When building actions in
+> Step 3, set `host.connectionName` to the **key** from this map (e.g.
+> `shared_teams`), NOT the connection GUID. The GUID only goes inside the
+> `connectionReferences` entry. The engine matches the action's
+> `host.connectionName` to the key to find the right connection.
+
+> **Alternative** β if you already have a flow using the same connectors,
+> you can extract `connectionReferences` from its definition:
+> ```python
+> ref_flow = mcp("get_live_flow", environmentName=ENV, flowName="")
+> connection_references = ref_flow["properties"]["connectionReferences"]
+> ```
+
+See the `power-automate-mcp` skill's **connection-references.md** reference
+for the full connection reference structure.
+
+---
+
+## Step 3 β Build the Flow Definition
+
+Construct the definition object. See [flow-schema.md](references/flow-schema.md)
+for the full schema and these action pattern references for copy-paste templates:
+- [action-patterns-core.md](references/action-patterns-core.md) β Variables, control flow, expressions
+- [action-patterns-data.md](references/action-patterns-data.md) β Array transforms, HTTP, parsing
+- [action-patterns-connectors.md](references/action-patterns-connectors.md) β SharePoint, Outlook, Teams, Approvals
+
+```python
+definition = {
+ "$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#",
+ "contentVersion": "1.0.0.0",
+ "triggers": { ... }, # see trigger-types.md / build-patterns.md
+ "actions": { ... } # see ACTION-PATTERNS-*.md / build-patterns.md
+}
+```
+
+> See [build-patterns.md](references/build-patterns.md) for complete, ready-to-use
+> flow definitions covering Recurrence+SharePoint+Teams, HTTP triggers, and more.
+
+---
+
+## Step 4 β Deploy (Create or Update)
+
+`update_live_flow` handles both creation and updates in a single tool.
+
+### Create a new flow (no existing flow)
+
+Omit `flowName` β the server generates a new GUID and creates via PUT:
+
+```python
+result = mcp("update_live_flow",
+ environmentName=ENV,
+ # flowName omitted β creates a new flow
+ definition=definition,
+ connectionReferences=connection_references,
+ displayName="Overdue Invoice Notifications",
+ description="Weekly SharePoint β Teams notification flow, built by agent"
+)
+
+if result.get("error") is not None:
+ print("Create failed:", result["error"])
+else:
+ # Capture the new flow ID for subsequent steps
+ FLOW_ID = result["created"]
+ print(f"β Flow created: {FLOW_ID}")
+```
+
+### Update an existing flow
+
+Provide `flowName` to PATCH:
+
+```python
+result = mcp("update_live_flow",
+ environmentName=ENV,
+ flowName=FLOW_ID,
+ definition=definition,
+ connectionReferences=connection_references,
+ displayName="My Updated Flow",
+ description="Updated by agent on " + __import__('datetime').datetime.utcnow().isoformat()
+)
+
+if result.get("error") is not None:
+ print("Update failed:", result["error"])
+else:
+ print("Update succeeded:", result)
+```
+
+> β οΈ `update_live_flow` always returns an `error` key.
+> `null` (Python `None`) means success β do not treat the presence of the key as failure.
+>
+> β οΈ `description` is required for both create and update.
+
+### Common deployment errors
+
+| Error message (contains) | Cause | Fix |
+|---|---|---|
+| `missing from connectionReferences` | An action's `host.connectionName` references a key that doesn't exist in the `connectionReferences` map | Ensure `host.connectionName` uses the **key** from `connectionReferences` (e.g. `shared_teams`), not the raw GUID |
+| `ConnectionAuthorizationFailed` / 403 | The connection GUID belongs to another user or is not authorized | Re-run Step 2a and use a connection owned by the current `x-api-key` user |
+| `InvalidTemplate` / `InvalidDefinition` | Syntax error in the definition JSON | Check `runAfter` chains, expression syntax, and action type spelling |
+| `ConnectionNotConfigured` | A connector action exists but the connection GUID is invalid or expired | Re-check `list_live_connections` for a fresh GUID |
+
+---
+
+## Step 5 β Verify the Deployment
+
+```python
+check = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID)
+
+# Confirm state
+print("State:", check["properties"]["state"]) # Should be "Started"
+
+# Confirm the action we added is there
+acts = check["properties"]["definition"]["actions"]
+print("Actions:", list(acts.keys()))
+```
+
+---
+
+## Step 6 β Test the Flow
+
+> **MANDATORY**: Before triggering any test run, **ask the user for confirmation**.
+> Running a flow has real side effects β it may send emails, post Teams messages,
+> write to SharePoint, start approvals, or call external APIs. Explain what the
+> flow will do and wait for explicit approval before calling `trigger_live_flow`
+> or `resubmit_live_flow_run`.
+
+### Updated flows (have prior runs)
+
+The fastest path β resubmit the most recent run:
+
+```python
+runs = mcp("get_live_flow_runs", environmentName=ENV, flowName=FLOW_ID, top=1)
+if runs:
+ result = mcp("resubmit_live_flow_run",
+ environmentName=ENV, flowName=FLOW_ID, runName=runs[0]["name"])
+ print(result)
+```
+
+### Flows already using an HTTP trigger
+
+Fire directly with a test payload:
+
+```python
+schema = mcp("get_live_flow_http_schema",
+ environmentName=ENV, flowName=FLOW_ID)
+print("Expected body:", schema.get("triggerSchema"))
+
+result = mcp("trigger_live_flow",
+ environmentName=ENV, flowName=FLOW_ID,
+ body={"name": "Test", "value": 1})
+print(f"Status: {result['status']}")
+```
+
+### Brand-new non-HTTP flows (Recurrence, connector triggers, etc.)
+
+A brand-new Recurrence or connector-triggered flow has no runs to resubmit
+and no HTTP endpoint to call. **Deploy with a temporary HTTP trigger first,
+test the actions, then swap to the production trigger.**
+
+#### 7a β Save the real trigger, deploy with a temporary HTTP trigger
+
+```python
+# Save the production trigger you built in Step 3
+production_trigger = definition["triggers"]
+
+# Replace with a temporary HTTP trigger
+definition["triggers"] = {
+ "manual": {
+ "type": "Request",
+ "kind": "Http",
+ "inputs": {
+ "schema": {}
+ }
+ }
+}
+
+# Deploy (create or update) with the temp trigger
+result = mcp("update_live_flow",
+ environmentName=ENV,
+ flowName=FLOW_ID, # omit if creating new
+ definition=definition,
+ connectionReferences=connection_references,
+ displayName="Overdue Invoice Notifications",
+ description="Deployed with temp HTTP trigger for testing")
+
+if result.get("error") is not None:
+ print("Deploy failed:", result["error"])
+else:
+ if not FLOW_ID:
+ FLOW_ID = result["created"]
+ print(f"β Deployed with temp HTTP trigger: {FLOW_ID}")
+```
+
+#### 7b β Fire the flow and check the result
+
+```python
+# Trigger the flow
+test = mcp("trigger_live_flow",
+ environmentName=ENV, flowName=FLOW_ID)
+print(f"Trigger response status: {test['status']}")
+
+# Wait for the run to complete
+import time; time.sleep(15)
+
+# Check the run result
+runs = mcp("get_live_flow_runs",
+ environmentName=ENV, flowName=FLOW_ID, top=1)
+run = runs[0]
+print(f"Run {run['name']}: {run['status']}")
+
+if run["status"] == "Failed":
+ err = mcp("get_live_flow_run_error",
+ environmentName=ENV, flowName=FLOW_ID, runName=run["name"])
+ root = err["failedActions"][-1]
+ print(f"Root cause: {root['actionName']} β {root.get('code')}")
+ # Debug and fix the definition before proceeding
+ # See power-automate-debug skill for full diagnosis workflow
+```
+
+#### 7c β Swap to the production trigger
+
+Once the test run succeeds, replace the temporary HTTP trigger with the real one:
+
+```python
+# Restore the production trigger
+definition["triggers"] = production_trigger
+
+result = mcp("update_live_flow",
+ environmentName=ENV,
+ flowName=FLOW_ID,
+ definition=definition,
+ connectionReferences=connection_references,
+ description="Swapped to production trigger after successful test")
+
+if result.get("error") is not None:
+ print("Trigger swap failed:", result["error"])
+else:
+ print("β Production trigger deployed β flow is live")
+```
+
+> **Why this works**: The trigger is just the entry point β the actions are
+> identical regardless of how the flow starts. Testing via HTTP trigger
+> exercises all the same Compose, SharePoint, Teams, etc. actions.
+>
+> **Connector triggers** (e.g. "When an item is created in SharePoint"):
+> If actions reference `triggerBody()` or `triggerOutputs()`, pass a
+> representative test payload in `trigger_live_flow`'s `body` parameter
+> that matches the shape the connector trigger would produce.
+
+---
+
+## Gotchas
+
+| Mistake | Consequence | Prevention |
+|---|---|---|
+| Missing `connectionReferences` in deploy | 400 "Supply connectionReferences" | Always call `list_live_connections` first |
+| `"operationOptions"` missing on Foreach | Parallel execution, race conditions on writes | Always add `"Sequential"` |
+| `union(old_data, new_data)` | Old values override new (first-wins) | Use `union(new_data, old_data)` |
+| `split()` on potentially-null string | `InvalidTemplate` crash | Wrap with `coalesce(field, '')` |
+| Checking `result["error"]` exists | Always present; true error is `!= null` | Use `result.get("error") is not None` |
+| Flow deployed but state is "Stopped" | Flow won't run on schedule | Check connection auth; re-enable |
+| Teams "Chat with Flow bot" recipient as object | 400 `GraphUserDetailNotFound` | Use plain string with trailing semicolon (see below) |
+
+### Teams `PostMessageToConversation` β Recipient Formats
+
+The `body/recipient` parameter format depends on the `location` value:
+
+| Location | `body/recipient` format | Example |
+|---|---|---|
+| **Chat with Flow bot** | Plain email string with **trailing semicolon** | `"user@contoso.com;"` |
+| **Channel** | Object with `groupId` and `channelId` | `{"groupId": "...", "channelId": "..."}` |
+
+> **Common mistake**: passing `{"to": "user@contoso.com"}` for "Chat with Flow bot"
+> returns a 400 `GraphUserDetailNotFound` error. The API expects a plain string.
+
+---
+
+## Reference Files
+
+- [flow-schema.md](references/flow-schema.md) β Full flow definition JSON schema
+- [trigger-types.md](references/trigger-types.md) β Trigger type templates
+- [action-patterns-core.md](references/action-patterns-core.md) β Variables, control flow, expressions
+- [action-patterns-data.md](references/action-patterns-data.md) β Array transforms, HTTP, parsing
+- [action-patterns-connectors.md](references/action-patterns-connectors.md) β SharePoint, Outlook, Teams, Approvals
+- [build-patterns.md](references/build-patterns.md) β Complete flow definition templates (Recurrence+SP+Teams, HTTP trigger)
+
+## Related Skills
+
+- `flowstudio-power-automate-mcp` β Core connection setup and tool reference
+- `flowstudio-power-automate-debug` β Debug failing flows after deployment
diff --git a/skills/flowstudio-power-automate-build/references/action-patterns-connectors.md b/skills/flowstudio-power-automate-build/references/action-patterns-connectors.md
new file mode 100644
index 00000000..d9102d6d
--- /dev/null
+++ b/skills/flowstudio-power-automate-build/references/action-patterns-connectors.md
@@ -0,0 +1,542 @@
+# FlowStudio MCP β Action Patterns: Connectors
+
+SharePoint, Outlook, Teams, and Approvals connector action patterns.
+
+> All examples assume `"runAfter"` is set appropriately.
+> Replace `` with the **key** you used in `connectionReferences`
+> (e.g. `shared_sharepointonline`, `shared_teams`). This is NOT the connection
+> GUID β it is the logical reference name that links the action to its entry in
+> the `connectionReferences` map.
+
+---
+
+## SharePoint
+
+### SharePoint β Get Items
+
+```json
+"Get_SP_Items": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "GetItems"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList",
+ "$filter": "Status eq 'Active'",
+ "$top": 500
+ }
+ }
+}
+```
+
+Result reference: `@outputs('Get_SP_Items')?['body/value']`
+
+> **Dynamic OData filter with string interpolation**: inject a runtime value
+> directly into the `$filter` string using `@{...}` syntax:
+> ```
+> "$filter": "Title eq '@{outputs('ConfirmationCode')}'"
+> ```
+> Note the single-quotes inside double-quotes β correct OData string literal
+> syntax. Avoids a separate variable action.
+
+> **Pagination for large lists**: by default, GetItems stops at `$top`. To auto-paginate
+> beyond that, enable the pagination policy on the action. In the flow definition this
+> appears as:
+> ```json
+> "paginationPolicy": { "minimumItemCount": 10000 }
+> ```
+> Set `minimumItemCount` to the maximum number of items you expect. The connector will
+> keep fetching pages until that count is reached or the list is exhausted. Without this,
+> flows silently return a capped result on lists with >5,000 items.
+
+---
+
+### SharePoint β Get Item (Single Row by ID)
+
+```json
+"Get_SP_Item": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "GetItem"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList",
+ "id": "@triggerBody()?['ID']"
+ }
+ }
+}
+```
+
+Result reference: `@body('Get_SP_Item')?['FieldName']`
+
+> Use `GetItem` (not `GetItems` with a filter) when you already have the ID.
+> Re-fetching after a trigger gives you the **current** row state, not the
+> snapshot captured at trigger time β important if another process may have
+> modified the item since the flow started.
+
+---
+
+### SharePoint β Create Item
+
+```json
+"Create_SP_Item": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "PostItem"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList",
+ "item/Title": "@variables('myTitle')",
+ "item/Status": "Active"
+ }
+ }
+}
+```
+
+---
+
+### SharePoint β Update Item
+
+```json
+"Update_SP_Item": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "PatchItem"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList",
+ "id": "@item()?['ID']",
+ "item/Status": "Processed"
+ }
+ }
+}
+```
+
+---
+
+### SharePoint β File Upsert (Create or Overwrite in Document Library)
+
+SharePoint's `CreateFile` fails if the file already exists. To upsert (create or overwrite)
+without a prior existence check, use `GetFileMetadataByPath` on **both Succeeded and Failed**
+from `CreateFile` β if create failed because the file exists, the metadata call still
+returns its ID, which `UpdateFile` can then overwrite:
+
+```json
+"Create_File": {
+ "type": "OpenApiConnection",
+ "inputs": {
+ "host": { "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "", "operationId": "CreateFile" },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "folderPath": "/My Library/Subfolder",
+ "name": "@{variables('filename')}",
+ "body": "@outputs('Compose_File_Content')"
+ }
+ }
+},
+"Get_File_Metadata_By_Path": {
+ "type": "OpenApiConnection",
+ "runAfter": { "Create_File": ["Succeeded", "Failed"] },
+ "inputs": {
+ "host": { "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "", "operationId": "GetFileMetadataByPath" },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "path": "/My Library/Subfolder/@{variables('filename')}"
+ }
+ }
+},
+"Update_File": {
+ "type": "OpenApiConnection",
+ "runAfter": { "Get_File_Metadata_By_Path": ["Succeeded", "Skipped"] },
+ "inputs": {
+ "host": { "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "", "operationId": "UpdateFile" },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "id": "@outputs('Get_File_Metadata_By_Path')?['body/{Identifier}']",
+ "body": "@outputs('Compose_File_Content')"
+ }
+ }
+}
+```
+
+> If `Create_File` succeeds, `Get_File_Metadata_By_Path` is `Skipped` and `Update_File`
+> still fires (accepting `Skipped`), harmlessly overwriting the file just created.
+> If `Create_File` fails (file exists), the metadata call retrieves the existing file's ID
+> and `Update_File` overwrites it. Either way you end with the latest content.
+>
+> **Document library system properties** β when iterating a file library result (e.g.
+> from `ListFolder` or `GetFilesV2`), use curly-brace property names to access
+> SharePoint's built-in file metadata. These are different from list field names:
+> ```
+> @item()?['{Name}'] β filename without path (e.g. "report.csv")
+> @item()?['{FilenameWithExtension}'] β same as {Name} in most connectors
+> @item()?['{Identifier}'] β internal file ID for use in UpdateFile/DeleteFile
+> @item()?['{FullPath}'] β full server-relative path
+> @item()?['{IsFolder}'] β boolean, true for folder entries
+> ```
+
+---
+
+### SharePoint β GetItemChanges Column Gate
+
+When a SharePoint "item modified" trigger fires, it doesn't tell you WHICH
+column changed. Use `GetItemChanges` to get per-column change flags, then gate
+downstream logic on specific columns:
+
+```json
+"Get_Changes": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "GetItemChanges"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "",
+ "id": "@triggerBody()?['ID']",
+ "since": "@triggerBody()?['Modified']",
+ "includeDrafts": false
+ }
+ }
+}
+```
+
+Gate on a specific column:
+
+```json
+"expression": {
+ "and": [{
+ "equals": [
+ "@body('Get_Changes')?['Column']?['hasChanged']",
+ true
+ ]
+ }]
+}
+```
+
+> **New-item detection:** On the very first modification (version 1.0),
+> `GetItemChanges` may report no prior version. Check
+> `@equals(triggerBody()?['OData__UIVersionString'], '1.0')` to detect
+> newly created items and skip change-gate logic for those.
+
+---
+
+### SharePoint β REST MERGE via HttpRequest
+
+For cross-list updates or advanced operations not supported by the standard
+Update Item connector (e.g., updating a list in a different site), use the
+SharePoint REST API via the `HttpRequest` operation:
+
+```json
+"Update_Cross_List_Item": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "HttpRequest"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/target-site",
+ "parameters/method": "POST",
+ "parameters/uri": "/_api/web/lists(guid'')/items(@{variables('ItemId')})",
+ "parameters/headers": {
+ "Accept": "application/json;odata=nometadata",
+ "Content-Type": "application/json;odata=nometadata",
+ "X-HTTP-Method": "MERGE",
+ "IF-MATCH": "*"
+ },
+ "parameters/body": "{ \"Title\": \"@{variables('NewTitle')}\", \"Status\": \"@{variables('NewStatus')}\" }"
+ }
+ }
+}
+```
+
+> **Key headers:**
+> - `X-HTTP-Method: MERGE` β tells SharePoint to do a partial update (PATCH semantics)
+> - `IF-MATCH: *` β overwrites regardless of current ETag (no conflict check)
+>
+> The `HttpRequest` operation reuses the existing SharePoint connection β no extra
+> authentication needed. Use this when the standard Update Item connector can't
+> reach the target list (different site collection, or you need raw REST control).
+
+---
+
+### SharePoint β File as JSON Database (Read + Parse)
+
+Use a SharePoint document library JSON file as a queryable "database" of
+last-known-state records. A separate process (e.g., Power BI dataflow) maintains
+the file; the flow downloads and filters it for before/after comparisons.
+
+```json
+"Get_File": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "GetFileContent"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "id": "%252fShared%2bDocuments%252fdata.json",
+ "inferContentType": false
+ }
+ }
+},
+"Parse_JSON_File": {
+ "type": "Compose",
+ "runAfter": { "Get_File": ["Succeeded"] },
+ "inputs": "@json(decodeBase64(body('Get_File')?['$content']))"
+},
+"Find_Record": {
+ "type": "Query",
+ "runAfter": { "Parse_JSON_File": ["Succeeded"] },
+ "inputs": {
+ "from": "@outputs('Parse_JSON_File')",
+ "where": "@equals(item()?['id'], variables('RecordId'))"
+ }
+}
+```
+
+> **Decode chain:** `GetFileContent` returns base64-encoded content in
+> `body(...)?['$content']`. Apply `decodeBase64()` then `json()` to get a
+> usable array. `Filter Array` then acts as a WHERE clause.
+>
+> **When to use:** When you need a lightweight "before" snapshot to detect field
+> changes from a webhook payload (the "after" state). Simpler than maintaining
+> a full SharePoint list mirror β works well for up to ~10K records.
+>
+> **File path encoding:** In the `id` parameter, SharePoint URL-encodes paths
+> twice. Spaces become `%2b` (plus sign), slashes become `%252f`.
+
+---
+
+## Outlook
+
+### Outlook β Send Email
+
+```json
+"Send_Email": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_office365",
+ "connectionName": "",
+ "operationId": "SendEmailV2"
+ },
+ "parameters": {
+ "emailMessage/To": "recipient@contoso.com",
+ "emailMessage/Subject": "Automated notification",
+ "emailMessage/Body": "
@{outputs('Compose_Message')}
",
+ "emailMessage/IsHtml": true
+ }
+ }
+}
+```
+
+---
+
+### Outlook β Get Emails (Read Template from Folder)
+
+```json
+"Get_Email_Template": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_office365",
+ "connectionName": "",
+ "operationId": "GetEmailsV3"
+ },
+ "parameters": {
+ "folderPath": "Id::",
+ "fetchOnlyUnread": false,
+ "includeAttachments": false,
+ "top": 1,
+ "importance": "Any",
+ "fetchOnlyWithAttachment": false,
+ "subjectFilter": "My Email Template Subject"
+ }
+ }
+}
+```
+
+Access subject and body:
+```
+@first(outputs('Get_Email_Template')?['body/value'])?['subject']
+@first(outputs('Get_Email_Template')?['body/value'])?['body']
+```
+
+> **Outlook-as-CMS pattern**: store a template email in a dedicated Outlook folder.
+> Set `fetchOnlyUnread: false` so the template persists after first use.
+> Non-technical users can update subject and body by editing that email β
+> no flow changes required. Pass subject and body directly into `SendEmailV2`.
+>
+> To get a folder ID: in Outlook on the web, right-click the folder β open in
+> new tab β the folder GUID is in the URL. Prefix it with `Id::` in `folderPath`.
+
+---
+
+## Teams
+
+### Teams β Post Message
+
+```json
+"Post_Teams_Message": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_teams",
+ "connectionName": "",
+ "operationId": "PostMessageToConversation"
+ },
+ "parameters": {
+ "poster": "Flow bot",
+ "location": "Channel",
+ "body/recipient": {
+ "groupId": "",
+ "channelId": ""
+ },
+ "body/messageBody": "@outputs('Compose_Message')"
+ }
+ }
+}
+```
+
+#### Variant: Group Chat (1:1 or Multi-Person)
+
+To post to a group chat instead of a channel, use `"location": "Group chat"` with
+a thread ID as the recipient:
+
+```json
+"Post_To_Group_Chat": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_teams",
+ "connectionName": "",
+ "operationId": "PostMessageToConversation"
+ },
+ "parameters": {
+ "poster": "Flow bot",
+ "location": "Group chat",
+ "body/recipient": "19:@thread.v2",
+ "body/messageBody": "@outputs('Compose_Message')"
+ }
+ }
+}
+```
+
+For 1:1 ("Chat with Flow bot"), use `"location": "Chat with Flow bot"` and set
+`body/recipient` to the user's email address.
+
+> **Active-user gate:** When sending notifications in a loop, check the recipient's
+> Azure AD account is enabled before posting β avoids failed deliveries to departed
+> staff:
+> ```json
+> "Check_User_Active": {
+> "type": "OpenApiConnection",
+> "inputs": {
+> "host": { "apiId": "/providers/Microsoft.PowerApps/apis/shared_office365users",
+> "operationId": "UserProfile_V2" },
+> "parameters": { "id": "@{item()?['Email']}" }
+> }
+> }
+> ```
+> Then gate: `@equals(body('Check_User_Active')?['accountEnabled'], true)`
+
+---
+
+## Approvals
+
+### Split Approval (Create β Wait)
+
+The standard "Start and wait for an approval" is a single blocking action.
+For more control (e.g., posting the approval link in Teams, or adding a timeout
+scope), split it into two actions: `CreateAnApproval` (fire-and-forget) then
+`WaitForAnApproval` (webhook pause).
+
+```json
+"Create_Approval": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_approvals",
+ "connectionName": "",
+ "operationId": "CreateAnApproval"
+ },
+ "parameters": {
+ "approvalType": "CustomResponse/Result",
+ "ApprovalCreationInput/title": "Review: @{variables('ItemTitle')}",
+ "ApprovalCreationInput/assignedTo": "approver@contoso.com",
+ "ApprovalCreationInput/details": "Please review and select an option.",
+ "ApprovalCreationInput/responseOptions": ["Approve", "Reject", "Defer"],
+ "ApprovalCreationInput/enableNotifications": true,
+ "ApprovalCreationInput/enableReassignment": true
+ }
+ }
+},
+"Wait_For_Approval": {
+ "type": "OpenApiConnectionWebhook",
+ "runAfter": { "Create_Approval": ["Succeeded"] },
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_approvals",
+ "connectionName": "",
+ "operationId": "WaitForAnApproval"
+ },
+ "parameters": {
+ "approvalName": "@body('Create_Approval')?['name']"
+ }
+ }
+}
+```
+
+> **`approvalType` options:**
+> - `"Approve/Reject - First to respond"` β binary, first responder wins
+> - `"Approve/Reject - Everyone must approve"` β requires all assignees
+> - `"CustomResponse/Result"` β define your own response buttons
+>
+> After `Wait_For_Approval`, read the outcome:
+> ```
+> @body('Wait_For_Approval')?['outcome'] β "Approve", "Reject", or custom
+> @body('Wait_For_Approval')?['responses'][0]?['responder']?['displayName']
+> @body('Wait_For_Approval')?['responses'][0]?['comments']
+> ```
+>
+> The split pattern lets you insert actions between create and wait β e.g.,
+> posting the approval link to Teams, starting a timeout scope, or logging
+> the pending approval to a tracking list.
diff --git a/skills/flowstudio-power-automate-build/references/action-patterns-core.md b/skills/flowstudio-power-automate-build/references/action-patterns-core.md
new file mode 100644
index 00000000..74221ba8
--- /dev/null
+++ b/skills/flowstudio-power-automate-build/references/action-patterns-core.md
@@ -0,0 +1,542 @@
+# FlowStudio MCP β Action Patterns: Core
+
+Variables, control flow, and expression patterns for Power Automate flow definitions.
+
+> All examples assume `"runAfter"` is set appropriately.
+> Replace `` with the **key** you used in your `connectionReferences` map
+> (e.g. `shared_teams`, `shared_office365`) β NOT the connection GUID.
+
+---
+
+## Data & Variables
+
+### Compose (Store a Value)
+
+```json
+"Compose_My_Value": {
+ "type": "Compose",
+ "runAfter": {},
+ "inputs": "@variables('myVar')"
+}
+```
+
+Reference: `@outputs('Compose_My_Value')`
+
+---
+
+### Initialize Variable
+
+```json
+"Init_Counter": {
+ "type": "InitializeVariable",
+ "runAfter": {},
+ "inputs": {
+ "variables": [{
+ "name": "counter",
+ "type": "Integer",
+ "value": 0
+ }]
+ }
+}
+```
+
+Types: `"Integer"`, `"Float"`, `"Boolean"`, `"String"`, `"Array"`, `"Object"`
+
+---
+
+### Set Variable
+
+```json
+"Set_Counter": {
+ "type": "SetVariable",
+ "runAfter": {},
+ "inputs": {
+ "name": "counter",
+ "value": "@add(variables('counter'), 1)"
+ }
+}
+```
+
+---
+
+### Append to Array Variable
+
+```json
+"Collect_Item": {
+ "type": "AppendToArrayVariable",
+ "runAfter": {},
+ "inputs": {
+ "name": "resultArray",
+ "value": "@item()"
+ }
+}
+```
+
+---
+
+### Increment Variable
+
+```json
+"Increment_Counter": {
+ "type": "IncrementVariable",
+ "runAfter": {},
+ "inputs": {
+ "name": "counter",
+ "value": 1
+ }
+}
+```
+
+> Use `IncrementVariable` (not `SetVariable` with `add()`) for counters inside loops β
+> it is atomic and avoids expression errors when the variable is used elsewhere in the
+> same iteration. `value` can be any integer or expression, e.g. `@mul(item()?['Interval'], 60)`
+> to advance a Unix timestamp cursor by N minutes.
+
+---
+
+## Control Flow
+
+### Condition (If/Else)
+
+```json
+"Check_Status": {
+ "type": "If",
+ "runAfter": {},
+ "expression": {
+ "and": [{ "equals": ["@item()?['Status']", "Active"] }]
+ },
+ "actions": {
+ "Handle_Active": {
+ "type": "Compose",
+ "runAfter": {},
+ "inputs": "Active user: @{item()?['Name']}"
+ }
+ },
+ "else": {
+ "actions": {
+ "Handle_Inactive": {
+ "type": "Compose",
+ "runAfter": {},
+ "inputs": "Inactive user"
+ }
+ }
+ }
+}
+```
+
+Comparison operators: `equals`, `not`, `greater`, `greaterOrEquals`, `less`, `lessOrEquals`, `contains`
+Logical: `and: [...]`, `or: [...]`
+
+---
+
+### Switch
+
+```json
+"Route_By_Type": {
+ "type": "Switch",
+ "runAfter": {},
+ "expression": "@triggerBody()?['type']",
+ "cases": {
+ "Case_Email": {
+ "case": "email",
+ "actions": { "Process_Email": { "type": "Compose", "runAfter": {}, "inputs": "email" } }
+ },
+ "Case_Teams": {
+ "case": "teams",
+ "actions": { "Process_Teams": { "type": "Compose", "runAfter": {}, "inputs": "teams" } }
+ }
+ },
+ "default": {
+ "actions": { "Unknown_Type": { "type": "Compose", "runAfter": {}, "inputs": "unknown" } }
+ }
+}
+```
+
+---
+
+### Scope (Grouping / Try-Catch)
+
+Wrap related actions in a Scope to give them a shared name, collapse them in the
+designer, and β most importantly β handle their errors as a unit.
+
+```json
+"Scope_Get_Customer": {
+ "type": "Scope",
+ "runAfter": {},
+ "actions": {
+ "HTTP_Get_Customer": {
+ "type": "Http",
+ "runAfter": {},
+ "inputs": {
+ "method": "GET",
+ "uri": "https://api.example.com/customers/@{variables('customerId')}"
+ }
+ },
+ "Compose_Email": {
+ "type": "Compose",
+ "runAfter": { "HTTP_Get_Customer": ["Succeeded"] },
+ "inputs": "@outputs('HTTP_Get_Customer')?['body/email']"
+ }
+ }
+},
+"Handle_Scope_Error": {
+ "type": "Compose",
+ "runAfter": { "Scope_Get_Customer": ["Failed", "TimedOut"] },
+ "inputs": "Scope failed: @{result('Scope_Get_Customer')?[0]?['error']?['message']}"
+}
+```
+
+> Reference scope results: `@result('Scope_Get_Customer')` returns an array of action
+> outcomes. Use `runAfter: {"MyScope": ["Failed", "TimedOut"]}` on a follow-up action
+> to create try/catch semantics without a Terminate.
+
+---
+
+### Foreach (Sequential)
+
+```json
+"Process_Each_Item": {
+ "type": "Foreach",
+ "runAfter": {},
+ "foreach": "@outputs('Get_Items')?['body/value']",
+ "operationOptions": "Sequential",
+ "actions": {
+ "Handle_Item": {
+ "type": "Compose",
+ "runAfter": {},
+ "inputs": "@item()?['Title']"
+ }
+ }
+}
+```
+
+> Always include `"operationOptions": "Sequential"` unless parallel is intentional.
+
+---
+
+### Foreach (Parallel with Concurrency Limit)
+
+```json
+"Process_Each_Item_Parallel": {
+ "type": "Foreach",
+ "runAfter": {},
+ "foreach": "@body('Get_SP_Items')?['value']",
+ "runtimeConfiguration": {
+ "concurrency": {
+ "repetitions": 20
+ }
+ },
+ "actions": {
+ "HTTP_Upsert": {
+ "type": "Http",
+ "runAfter": {},
+ "inputs": {
+ "method": "POST",
+ "uri": "https://api.example.com/contacts/@{item()?['Email']}"
+ }
+ }
+ }
+}
+```
+
+> Set `repetitions` to control how many items are processed simultaneously.
+> Practical values: `5β10` for external API calls (respect rate limits),
+> `20β50` for internal/fast operations.
+> Omit `runtimeConfiguration.concurrency` entirely for the platform default
+> (currently 50). Do NOT use `"operationOptions": "Sequential"` and concurrency together.
+
+---
+
+### Wait (Delay)
+
+```json
+"Delay_10_Minutes": {
+ "type": "Wait",
+ "runAfter": {},
+ "inputs": {
+ "interval": {
+ "count": 10,
+ "unit": "Minute"
+ }
+ }
+}
+```
+
+Valid `unit` values: `"Second"`, `"Minute"`, `"Hour"`, `"Day"`
+
+> Use a Delay + re-fetch as a deduplication guard: wait for any competing process
+> to complete, then re-read the record before acting. This avoids double-processing
+> when multiple triggers or manual edits can race on the same item.
+
+---
+
+### Terminate (Success or Failure)
+
+```json
+"Terminate_Success": {
+ "type": "Terminate",
+ "runAfter": {},
+ "inputs": {
+ "runStatus": "Succeeded"
+ }
+},
+"Terminate_Failure": {
+ "type": "Terminate",
+ "runAfter": { "Risky_Action": ["Failed"] },
+ "inputs": {
+ "runStatus": "Failed",
+ "runError": {
+ "code": "StepFailed",
+ "message": "@{outputs('Get_Error_Message')}"
+ }
+ }
+}
+```
+
+---
+
+### Do Until (Loop Until Condition)
+
+Repeats a block of actions until an exit condition becomes true.
+Use when the number of iterations is not known upfront (e.g. paginating an API,
+walking a time range, polling until a status changes).
+
+```json
+"Do_Until_Done": {
+ "type": "Until",
+ "runAfter": {},
+ "expression": "@greaterOrEquals(variables('cursor'), variables('endValue'))",
+ "limit": {
+ "count": 5000,
+ "timeout": "PT5H"
+ },
+ "actions": {
+ "Do_Work": {
+ "type": "Compose",
+ "runAfter": {},
+ "inputs": "@variables('cursor')"
+ },
+ "Advance_Cursor": {
+ "type": "IncrementVariable",
+ "runAfter": { "Do_Work": ["Succeeded"] },
+ "inputs": {
+ "name": "cursor",
+ "value": 1
+ }
+ }
+ }
+}
+```
+
+> Always set `limit.count` and `limit.timeout` explicitly β the platform defaults are
+> low (60 iterations, 1 hour). For time-range walkers use `limit.count: 5000` and
+> `limit.timeout: "PT5H"` (ISO 8601 duration).
+>
+> The exit condition is evaluated **before** each iteration. Initialise your cursor
+> variable before the loop so the condition can evaluate correctly on the first pass.
+
+---
+
+### Async Polling with RequestId Correlation
+
+When an API starts a long-running job asynchronously (e.g. Power BI dataset refresh,
+report generation, batch export), the trigger call returns a request ID. Capture it
+from the **response header**, then poll a status endpoint filtering by that exact ID:
+
+```json
+"Start_Job": {
+ "type": "Http",
+ "inputs": { "method": "POST", "uri": "https://api.example.com/jobs" }
+},
+"Capture_Request_ID": {
+ "type": "Compose",
+ "runAfter": { "Start_Job": ["Succeeded"] },
+ "inputs": "@outputs('Start_Job')?['headers/X-Request-Id']"
+},
+"Initialize_Status": {
+ "type": "InitializeVariable",
+ "inputs": { "variables": [{ "name": "jobStatus", "type": "String", "value": "Running" }] }
+},
+"Poll_Until_Done": {
+ "type": "Until",
+ "expression": "@not(equals(variables('jobStatus'), 'Running'))",
+ "limit": { "count": 60, "timeout": "PT30M" },
+ "actions": {
+ "Delay": { "type": "Wait", "inputs": { "interval": { "count": 20, "unit": "Second" } } },
+ "Get_History": {
+ "type": "Http",
+ "runAfter": { "Delay": ["Succeeded"] },
+ "inputs": { "method": "GET", "uri": "https://api.example.com/jobs/history" }
+ },
+ "Filter_This_Job": {
+ "type": "Query",
+ "runAfter": { "Get_History": ["Succeeded"] },
+ "inputs": {
+ "from": "@outputs('Get_History')?['body/items']",
+ "where": "@equals(item()?['requestId'], outputs('Capture_Request_ID'))"
+ }
+ },
+ "Set_Status": {
+ "type": "SetVariable",
+ "runAfter": { "Filter_This_Job": ["Succeeded"] },
+ "inputs": {
+ "name": "jobStatus",
+ "value": "@first(body('Filter_This_Job'))?['status']"
+ }
+ }
+ }
+},
+"Handle_Failure": {
+ "type": "If",
+ "runAfter": { "Poll_Until_Done": ["Succeeded"] },
+ "expression": { "equals": ["@variables('jobStatus')", "Failed"] },
+ "actions": { "Terminate_Failed": { "type": "Terminate", "inputs": { "runStatus": "Failed" } } },
+ "else": { "actions": {} }
+}
+```
+
+Access response headers: `@outputs('Start_Job')?['headers/X-Request-Id']`
+
+> **Status variable initialisation**: set a sentinel value (`"Running"`, `"Unknown"`) before
+> the loop. The exit condition tests for any value other than the sentinel.
+> This way an empty poll result (job not yet in history) leaves the variable unchanged
+> and the loop continues β it doesn't accidentally exit on null.
+>
+> **Filter before extracting**: always `Filter Array` the history to your specific
+> request ID before calling `first()`. History endpoints return all jobs; without
+> filtering, status from a different concurrent job can corrupt your poll.
+
+---
+
+### runAfter Fallback (Failed β Alternative Action)
+
+Route to a fallback action when a primary action fails β without a Condition block.
+Simply set `runAfter` on the fallback to accept `["Failed"]` from the primary:
+
+```json
+"HTTP_Get_Hi_Res": {
+ "type": "Http",
+ "runAfter": {},
+ "inputs": { "method": "GET", "uri": "https://api.example.com/data?resolution=hi-res" }
+},
+"HTTP_Get_Low_Res": {
+ "type": "Http",
+ "runAfter": { "HTTP_Get_Hi_Res": ["Failed"] },
+ "inputs": { "method": "GET", "uri": "https://api.example.com/data?resolution=low-res" }
+}
+```
+
+> Actions that follow can use `runAfter` accepting both `["Succeeded", "Skipped"]` to
+> handle either path β see **Fan-In Join Gate** below.
+
+---
+
+### Fan-In Join Gate (Merge Two Mutually Exclusive Branches)
+
+When two branches are mutually exclusive (only one can succeed per run), use a single
+downstream action that accepts `["Succeeded", "Skipped"]` from **both** branches.
+The gate fires exactly once regardless of which branch ran:
+
+```json
+"Increment_Count": {
+ "type": "IncrementVariable",
+ "runAfter": {
+ "Update_Hi_Res_Metadata": ["Succeeded", "Skipped"],
+ "Update_Low_Res_Metadata": ["Succeeded", "Skipped"]
+ },
+ "inputs": { "name": "LoopCount", "value": 1 }
+}
+```
+
+> This avoids duplicating the downstream action in each branch. The key insight:
+> whichever branch was skipped reports `Skipped` β the gate accepts that state and
+> fires once. Only works cleanly when the two branches are truly mutually exclusive
+> (e.g. one is `runAfter: [...Failed]` of the other).
+
+---
+
+## Expressions
+
+### Common Expression Patterns
+
+```
+Null-safe field access: @item()?['FieldName']
+Null guard: @coalesce(item()?['Name'], 'Unknown')
+String format: @{variables('firstName')} @{variables('lastName')}
+Date today: @utcNow()
+Formatted date: @formatDateTime(utcNow(), 'dd/MM/yyyy')
+Add days: @addDays(utcNow(), 7)
+Array length: @length(variables('myArray'))
+Filter array: Use the "Filter array" action (no inline filter expression exists in PA)
+Union (new wins): @union(body('New_Data'), outputs('Old_Data'))
+Sort: @sort(variables('myArray'), 'Date')
+Unix timestamp β date: @formatDateTime(addseconds('1970-1-1', triggerBody()?['created']), 'yyyy-MM-dd')
+Date β Unix milliseconds: @div(sub(ticks(startOfDay(item()?['Created'])), ticks(formatDateTime('1970-01-01Z','o'))), 10000)
+Date β Unix seconds: @div(sub(ticks(item()?['Start']), ticks('1970-01-01T00:00:00Z')), 10000000)
+Unix seconds β datetime: @addSeconds('1970-01-01T00:00:00Z', int(variables('Unix')))
+Coalesce as no-else: @coalesce(outputs('Optional_Step'), outputs('Default_Step'))
+Flow elapsed minutes: @div(float(sub(ticks(utcNow()), ticks(outputs('Flow_Start')))), 600000000)
+HH:mm time string: @formatDateTime(outputs('Local_Datetime'), 'HH:mm')
+Response header: @outputs('HTTP_Action')?['headers/X-Request-Id']
+Array max (by field): @reverse(sort(body('Select_Items'), 'Date'))[0]
+Integer day span: @int(split(dateDifference(outputs('Start'), outputs('End')), '.')[0])
+ISO week number: @div(add(dayofyear(addDays(subtractFromTime(date, sub(dayofweek(date),1), 'Day'), 3)), 6), 7)
+Join errors to string: @if(equals(length(variables('Errors')),0), null, concat(join(variables('Errors'),', '),' not found.'))
+Normalize before compare: @replace(coalesce(outputs('Value'),''),'_',' ')
+Robust non-empty check: @greater(length(trim(coalesce(string(outputs('Val')), ''))), 0)
+```
+
+### Newlines in Expressions
+
+> **`\n` does NOT produce a newline inside Power Automate expressions.** It is
+> treated as a literal backslash + `n` and will either appear verbatim or cause
+> a validation error.
+
+Use `decodeUriComponent('%0a')` wherever you need a newline character:
+
+```
+Newline (LF): decodeUriComponent('%0a')
+CRLF: decodeUriComponent('%0d%0a')
+```
+
+Example β multi-line Teams or email body via `concat()`:
+```json
+"Compose_Message": {
+ "type": "Compose",
+ "inputs": "@concat('Hi ', outputs('Get_User')?['body/displayName'], ',', decodeUriComponent('%0a%0a'), 'Your report is ready.', decodeUriComponent('%0a'), '- The Team')"
+}
+```
+
+Example β `join()` with newline separator:
+```json
+"Compose_List": {
+ "type": "Compose",
+ "inputs": "@join(body('Select_Names'), decodeUriComponent('%0a'))"
+}
+```
+
+> This is the only reliable way to embed newlines in dynamically built strings
+> in Power Automate flow definitions (confirmed against Logic Apps runtime).
+
+---
+
+### Sum an array (XPath trick)
+
+Power Automate has no native `sum()` function. Use XPath on XML instead:
+
+```json
+"Prepare_For_Sum": {
+ "type": "Compose",
+ "runAfter": {},
+ "inputs": { "root": { "numbers": "@body('Select_Amounts')" } }
+},
+"Sum": {
+ "type": "Compose",
+ "runAfter": { "Prepare_For_Sum": ["Succeeded"] },
+ "inputs": "@xpath(xml(outputs('Prepare_For_Sum')), 'sum(/root/numbers)')"
+}
+```
+
+`Select_Amounts` must output a flat array of numbers (use a **Select** action to extract a single numeric field first). The result is a number you can use directly in conditions or calculations.
+
+> This is the only way to aggregate (sum/min/max) an array without a loop in Power Automate.
diff --git a/skills/flowstudio-power-automate-build/references/action-patterns-data.md b/skills/flowstudio-power-automate-build/references/action-patterns-data.md
new file mode 100644
index 00000000..d1c652f2
--- /dev/null
+++ b/skills/flowstudio-power-automate-build/references/action-patterns-data.md
@@ -0,0 +1,735 @@
+# FlowStudio MCP β Action Patterns: Data Transforms
+
+Array operations, HTTP calls, parsing, and data transformation patterns.
+
+> All examples assume `"runAfter"` is set appropriately.
+> `` is the **key** in `connectionReferences` (e.g. `shared_sharepointonline`), not the GUID.
+> The GUID goes in the map value's `connectionName` property.
+
+---
+
+## Array Operations
+
+### Select (Reshape / Project an Array)
+
+Transforms each item in an array, keeping only the columns you need or renaming them.
+Avoids carrying large objects through the rest of the flow.
+
+```json
+"Select_Needed_Columns": {
+ "type": "Select",
+ "runAfter": {},
+ "inputs": {
+ "from": "@outputs('HTTP_Get_Subscriptions')?['body/data']",
+ "select": {
+ "id": "@item()?['id']",
+ "status": "@item()?['status']",
+ "trial_end": "@item()?['trial_end']",
+ "cancel_at": "@item()?['cancel_at']",
+ "interval": "@item()?['plan']?['interval']"
+ }
+ }
+}
+```
+
+Result reference: `@body('Select_Needed_Columns')` β returns a direct array of reshaped objects.
+
+> Use Select before looping or filtering to reduce payload size and simplify
+> downstream expressions. Works on any array β SP results, HTTP responses, variables.
+>
+> **Tips:**
+> - **Single-to-array coercion:** When an API returns a single object but you need
+> Select (which requires an array), wrap it: `@array(body('Get_Employee')?['data'])`.
+> The output is a 1-element array β access results via `?[0]?['field']`.
+> - **Null-normalize optional fields:** Use `@if(empty(item()?['field']), null, item()?['field'])`
+> on every optional field to normalize empty strings, missing properties, and empty
+> objects to explicit `null`. Ensures consistent downstream `@equals(..., @null)` checks.
+> - **Flatten nested objects:** Project nested properties into flat fields:
+> ```
+> "manager_name": "@if(empty(item()?['manager']?['name']), null, item()?['manager']?['name'])"
+> ```
+> This enables direct field-level comparison with a flat schema from another source.
+
+---
+
+### Filter Array (Query)
+
+Filters an array to items matching a condition. Use the action form (not the `filter()`
+expression) for complex multi-condition logic β it's clearer and easier to maintain.
+
+```json
+"Filter_Active_Subscriptions": {
+ "type": "Query",
+ "runAfter": {},
+ "inputs": {
+ "from": "@body('Select_Needed_Columns')",
+ "where": "@and(or(equals(item().status, 'trialing'), equals(item().status, 'active')), equals(item().cancel_at, null))"
+ }
+}
+```
+
+Result reference: `@body('Filter_Active_Subscriptions')` β direct filtered array.
+
+> Tip: run multiple Filter Array actions on the same source array to create
+> named buckets (e.g. active, being-canceled, fully-canceled), then use
+> `coalesce(first(body('Filter_A')), first(body('Filter_B')), ...)` to pick
+> the highest-priority match without any loops.
+
+---
+
+### Create CSV Table (Array β CSV String)
+
+Converts an array of objects into a CSV-formatted string β no connector call, no code.
+Use after a `Select` or `Filter Array` to export data or pass it to a file-write action.
+
+```json
+"Create_CSV": {
+ "type": "Table",
+ "runAfter": {},
+ "inputs": {
+ "from": "@body('Select_Output_Columns')",
+ "format": "CSV"
+ }
+}
+```
+
+Result reference: `@body('Create_CSV')` β a plain string with header row + data rows.
+
+```json
+// Custom column order / renamed headers:
+"Create_CSV_Custom": {
+ "type": "Table",
+ "inputs": {
+ "from": "@body('Select_Output_Columns')",
+ "format": "CSV",
+ "columns": [
+ { "header": "Date", "value": "@item()?['transactionDate']" },
+ { "header": "Amount", "value": "@item()?['amount']" },
+ { "header": "Description", "value": "@item()?['description']" }
+ ]
+ }
+}
+```
+
+> Without `columns`, headers are taken from the object property names in the source array.
+> With `columns`, you control header names and column order explicitly.
+>
+> The output is a raw string. Write it to a file with `CreateFile` or `UpdateFile`
+> (set `body` to `@body('Create_CSV')`), or store in a variable with `SetVariable`.
+>
+> If source data came from Power BI's `ExecuteDatasetQuery`, column names will be
+> wrapped in square brackets (e.g. `[Amount]`). Strip them before writing:
+> `@replace(replace(body('Create_CSV'),'[',''),']','')`
+
+---
+
+### range() + Select for Array Generation
+
+`range(0, N)` produces an integer sequence `[0, 1, 2, β¦, N-1]`. Pipe it through
+a Select action to generate date series, index grids, or any computed array
+without a loop:
+
+```json
+// Generate 14 consecutive dates starting from a base date
+"Generate_Date_Series": {
+ "type": "Select",
+ "inputs": {
+ "from": "@range(0, 14)",
+ "select": "@addDays(outputs('Base_Date'), item(), 'yyyy-MM-dd')"
+ }
+}
+```
+
+Result: `@body('Generate_Date_Series')` β `["2025-01-06", "2025-01-07", β¦, "2025-01-19"]`
+
+```json
+// Flatten a 2D array (rows Γ cols) into 1D using arithmetic indexing
+"Flatten_Grid": {
+ "type": "Select",
+ "inputs": {
+ "from": "@range(0, mul(length(outputs('Rows')), length(outputs('Cols'))))",
+ "select": {
+ "row": "@outputs('Rows')[div(item(), length(outputs('Cols')))]",
+ "col": "@outputs('Cols')[mod(item(), length(outputs('Cols')))]"
+ }
+ }
+}
+```
+
+> `range()` is zero-based. The Cartesian product pattern above uses `div(i, cols)`
+> for the row index and `mod(i, cols)` for the column index β equivalent to a
+> nested for-loop flattened into a single pass. Useful for generating time-slot Γ
+> date grids, shift Γ location assignments, etc.
+
+---
+
+### Dynamic Dictionary via json(concat(join()))
+
+When you need O(1) keyβvalue lookups at runtime and Power Automate has no native
+dictionary type, build one from an array using Select + join + json:
+
+```json
+"Build_Key_Value_Pairs": {
+ "type": "Select",
+ "inputs": {
+ "from": "@body('Get_Lookup_Items')?['value']",
+ "select": "@concat('\"', item()?['Key'], '\":\"', item()?['Value'], '\"')"
+ }
+},
+"Assemble_Dictionary": {
+ "type": "Compose",
+ "inputs": "@json(concat('{', join(body('Build_Key_Value_Pairs'), ','), '}'))"
+}
+```
+
+Lookup: `@outputs('Assemble_Dictionary')?['myKey']`
+
+```json
+// Practical example: date β rate-code lookup for business rules
+"Build_Holiday_Rates": {
+ "type": "Select",
+ "inputs": {
+ "from": "@body('Get_Holidays')?['value']",
+ "select": "@concat('\"', formatDateTime(item()?['Date'], 'yyyy-MM-dd'), '\":\"', item()?['RateCode'], '\"')"
+ }
+},
+"Holiday_Dict": {
+ "type": "Compose",
+ "inputs": "@json(concat('{', join(body('Build_Holiday_Rates'), ','), '}'))"
+}
+```
+
+Then inside a loop: `@coalesce(outputs('Holiday_Dict')?[item()?['Date']], 'Standard')`
+
+> The `json(concat('{', join(...), '}'))` pattern works for string values. For numeric
+> or boolean values, omit the inner escaped quotes around the value portion.
+> Keys must be unique β duplicate keys silently overwrite earlier ones.
+> This replaces deeply nested `if(equals(key,'A'),'X', if(equals(key,'B'),'Y', ...))` chains.
+
+---
+
+### union() for Changed-Field Detection
+
+When you need to find records where *any* of several fields has changed, run one
+`Filter Array` per field and `union()` the results. This avoids a complex
+multi-condition filter and produces a clean deduplicated set:
+
+```json
+"Filter_Name_Changed": {
+ "type": "Query",
+ "inputs": { "from": "@body('Existing_Records')",
+ "where": "@not(equals(item()?['name'], item()?['dest_name']))" }
+},
+"Filter_Status_Changed": {
+ "type": "Query",
+ "inputs": { "from": "@body('Existing_Records')",
+ "where": "@not(equals(item()?['status'], item()?['dest_status']))" }
+},
+"All_Changed": {
+ "type": "Compose",
+ "inputs": "@union(body('Filter_Name_Changed'), body('Filter_Status_Changed'))"
+}
+```
+
+Reference: `@outputs('All_Changed')` β deduplicated array of rows where anything changed.
+
+> `union()` deduplicates by object identity, so a row that changed in both fields
+> appears once. Add more `Filter_*_Changed` inputs to `union()` as needed:
+> `@union(body('F1'), body('F2'), body('F3'))`
+
+---
+
+### File-Content Change Gate
+
+Before running expensive processing on a file or blob, compare its current content
+to a stored baseline. Skip entirely if nothing has changed β makes sync flows
+idempotent and safe to re-run or schedule aggressively.
+
+```json
+"Get_File_From_Source": { ... },
+"Get_Stored_Baseline": { ... },
+"Condition_File_Changed": {
+ "type": "If",
+ "expression": {
+ "not": {
+ "equals": [
+ "@base64(body('Get_File_From_Source'))",
+ "@body('Get_Stored_Baseline')"
+ ]
+ }
+ },
+ "actions": {
+ "Update_Baseline": { "...": "overwrite stored copy with new content" },
+ "Process_File": { "...": "all expensive work goes here" }
+ },
+ "else": { "actions": {} }
+}
+```
+
+> Store the baseline as a file in SharePoint or blob storage β `base64()`-encode the
+> live content before comparing so binary and text files are handled uniformly.
+> Write the new baseline **before** processing so a re-run after a partial failure
+> does not re-process the same file again.
+
+---
+
+### Set-Join for Sync (Update Detection without Nested Loops)
+
+When syncing a source collection into a destination (e.g. API response β SharePoint list,
+CSV β database), avoid nested `Apply to each` loops to find changed records.
+Instead, **project flat key arrays** and use `contains()` to perform set operations β
+zero nested loops, and the final loop only touches changed items.
+
+**Full insert/update/delete sync pattern:**
+
+```json
+// Step 1 β Project a flat key array from the DESTINATION (e.g. SharePoint)
+"Select_Dest_Keys": {
+ "type": "Select",
+ "inputs": {
+ "from": "@outputs('Get_Dest_Items')?['body/value']",
+ "select": "@item()?['Title']"
+ }
+}
+// β ["KEY1", "KEY2", "KEY3", ...]
+
+// Step 2 β INSERT: source rows whose key is NOT in destination
+"Filter_To_Insert": {
+ "type": "Query",
+ "inputs": {
+ "from": "@body('Source_Array')",
+ "where": "@not(contains(body('Select_Dest_Keys'), item()?['key']))"
+ }
+}
+// β Apply to each Filter_To_Insert β CreateItem
+
+// Step 3 β INNER JOIN: source rows that exist in destination
+"Filter_Already_Exists": {
+ "type": "Query",
+ "inputs": {
+ "from": "@body('Source_Array')",
+ "where": "@contains(body('Select_Dest_Keys'), item()?['key'])"
+ }
+}
+
+// Step 4 β UPDATE: one Filter per tracked field, then union them
+"Filter_Field1_Changed": {
+ "type": "Query",
+ "inputs": {
+ "from": "@body('Filter_Already_Exists')",
+ "where": "@not(equals(item()?['field1'], item()?['dest_field1']))"
+ }
+}
+"Filter_Field2_Changed": {
+ "type": "Query",
+ "inputs": {
+ "from": "@body('Filter_Already_Exists')",
+ "where": "@not(equals(item()?['field2'], item()?['dest_field2']))"
+ }
+}
+"Union_Changed": {
+ "type": "Compose",
+ "inputs": "@union(body('Filter_Field1_Changed'), body('Filter_Field2_Changed'))"
+}
+// β rows where ANY tracked field differs
+
+// Step 5 β Resolve destination IDs for changed rows (no nested loop)
+"Select_Changed_Keys": {
+ "type": "Select",
+ "inputs": { "from": "@outputs('Union_Changed')", "select": "@item()?['key']" }
+}
+"Filter_Dest_Items_To_Update": {
+ "type": "Query",
+ "inputs": {
+ "from": "@outputs('Get_Dest_Items')?['body/value']",
+ "where": "@contains(body('Select_Changed_Keys'), item()?['Title'])"
+ }
+}
+// Step 6 β Single loop over changed items only
+"Apply_to_each_Update": {
+ "type": "Foreach",
+ "foreach": "@body('Filter_Dest_Items_To_Update')",
+ "actions": {
+ "Get_Source_Row": {
+ "type": "Query",
+ "inputs": {
+ "from": "@outputs('Union_Changed')",
+ "where": "@equals(item()?['key'], items('Apply_to_each_Update')?['Title'])"
+ }
+ },
+ "Update_Item": {
+ "...": "...",
+ "id": "@items('Apply_to_each_Update')?['ID']",
+ "item/field1": "@first(body('Get_Source_Row'))?['field1']"
+ }
+ }
+}
+
+// Step 7 β DELETE: destination keys NOT in source
+"Select_Source_Keys": {
+ "type": "Select",
+ "inputs": { "from": "@body('Source_Array')", "select": "@item()?['key']" }
+}
+"Filter_To_Delete": {
+ "type": "Query",
+ "inputs": {
+ "from": "@outputs('Get_Dest_Items')?['body/value']",
+ "where": "@not(contains(body('Select_Source_Keys'), item()?['Title']))"
+ }
+}
+// β Apply to each Filter_To_Delete β DeleteItem
+```
+
+> **Why this beats nested loops**: the naive approach (for each dest item, scan source)
+> is O(n Γ m) and hits Power Automate's 100k-action run limit fast on large lists.
+> This pattern is O(n + m): one pass to build key arrays, one pass per filter.
+> The update loop in Step 6 only iterates *changed* records β often a tiny fraction
+> of the full collection. Run Steps 2/4/7 in **parallel Scopes** for further speed.
+
+---
+
+### First-or-Null Single-Row Lookup
+
+Use `first()` on the result array to extract one record without a loop.
+Then null-check the output to guard downstream actions.
+
+```json
+"Get_First_Match": {
+ "type": "Compose",
+ "runAfter": { "Get_SP_Items": ["Succeeded"] },
+ "inputs": "@first(outputs('Get_SP_Items')?['body/value'])"
+}
+```
+
+In a Condition, test for no-match with the **`@null` literal** (not `empty()`):
+
+```json
+"Condition": {
+ "type": "If",
+ "expression": {
+ "not": {
+ "equals": [
+ "@outputs('Get_First_Match')",
+ "@null"
+ ]
+ }
+ }
+}
+```
+
+Access fields on the matched row: `@outputs('Get_First_Match')?['FieldName']`
+
+> Use this instead of `Apply to each` when you only need one matching record.
+> `first()` on an empty array returns `null`; `empty()` is for arrays/strings,
+> not scalars β using it on a `first()` result causes a runtime error.
+
+---
+
+## HTTP & Parsing
+
+### HTTP Action (External API)
+
+```json
+"Call_External_API": {
+ "type": "Http",
+ "runAfter": {},
+ "inputs": {
+ "method": "POST",
+ "uri": "https://api.example.com/endpoint",
+ "headers": {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer @{variables('apiToken')}"
+ },
+ "body": {
+ "data": "@outputs('Compose_Payload')"
+ },
+ "retryPolicy": {
+ "type": "Fixed",
+ "count": 3,
+ "interval": "PT10S"
+ }
+ }
+}
+```
+
+Response reference: `@outputs('Call_External_API')?['body']`
+
+#### Variant: ActiveDirectoryOAuth (Service-to-Service)
+
+For calling APIs that require Azure AD client-credentials (e.g., Microsoft Graph),
+use in-line OAuth instead of a Bearer token variable:
+
+```json
+"Call_Graph_API": {
+ "type": "Http",
+ "runAfter": {},
+ "inputs": {
+ "method": "GET",
+ "uri": "https://graph.microsoft.com/v1.0/users?$search=\"employeeId:@{variables('Code')}\"&$select=id,displayName",
+ "headers": {
+ "Content-Type": "application/json",
+ "ConsistencyLevel": "eventual"
+ },
+ "authentication": {
+ "type": "ActiveDirectoryOAuth",
+ "authority": "https://login.microsoftonline.com",
+ "tenant": "",
+ "audience": "https://graph.microsoft.com",
+ "clientId": "",
+ "secret": "@parameters('graphClientSecret')"
+ }
+ }
+}
+```
+
+> **When to use:** Calling Microsoft Graph, Azure Resource Manager, or any
+> Azure AD-protected API from a flow without a premium connector.
+>
+> The `authentication` block handles the entire OAuth client-credentials flow
+> transparently β no manual token acquisition step needed.
+>
+> `ConsistencyLevel: eventual` is required for Graph `$search` queries.
+> Without it, `$search` returns 400.
+>
+> For PATCH/PUT writes, the same `authentication` block works β just change
+> `method` and add a `body`.
+>
+> β οΈ **Never hardcode `secret` inline.** Use `@parameters('graphClientSecret')`
+> and declare it in the flow's `parameters` block (type `securestring`). This
+> prevents the secret from appearing in run history or being readable via
+> `get_live_flow`. Declare the parameter like:
+> ```json
+> "parameters": {
+> "graphClientSecret": { "type": "securestring", "defaultValue": "" }
+> }
+> ```
+> Then pass the real value via the flow's connections or environment variables
+> β never commit it to source control.
+
+---
+
+### HTTP Response (Return to Caller)
+
+Used in HTTP-triggered flows to send a structured reply back to the caller.
+Must run before the flow times out (default 2 min for synchronous HTTP).
+
+```json
+"Response": {
+ "type": "Response",
+ "runAfter": {},
+ "inputs": {
+ "statusCode": 200,
+ "headers": {
+ "Content-Type": "application/json"
+ },
+ "body": {
+ "status": "success",
+ "message": "@{outputs('Compose_Result')}"
+ }
+ }
+}
+```
+
+> **PowerApps / low-code caller pattern**: always return `statusCode: 200` with a
+> `status` field in the body (`"success"` / `"error"`). PowerApps HTTP actions
+> do not handle non-2xx responses gracefully β the caller should inspect
+> `body.status` rather than the HTTP status code.
+>
+> Use multiple Response actions β one per branch β so each path returns
+> an appropriate message. Only one will execute per run.
+
+---
+
+### Child Flow Call (ParentβChild via HTTP POST)
+
+Power Automate supports parentβchild orchestration by calling a child flow's
+HTTP trigger URL directly. The parent sends an HTTP POST and blocks until the
+child returns a `Response` action. The child flow uses a `manual` (Request) trigger.
+
+```json
+// PARENT β call child flow and wait for its response
+"Call_Child_Flow": {
+ "type": "Http",
+ "inputs": {
+ "method": "POST",
+ "uri": "https://prod-XX.australiasoutheast.logic.azure.com:443/workflows//triggers/manual/paths/invoke?api-version=2016-06-01&sp=%2Ftriggers%2Fmanual%2Frun&sv=1.0&sig=",
+ "headers": { "Content-Type": "application/json" },
+ "body": {
+ "ID": "@triggerBody()?['ID']",
+ "WeekEnd": "@triggerBody()?['WeekEnd']",
+ "Payload": "@variables('dataArray')"
+ },
+ "retryPolicy": { "type": "none" }
+ },
+ "operationOptions": "DisableAsyncPattern",
+ "runtimeConfiguration": {
+ "contentTransfer": { "transferMode": "Chunked" }
+ },
+ "limit": { "timeout": "PT2H" }
+}
+```
+
+```json
+// CHILD β manual trigger receives the JSON body
+// (trigger definition)
+"manual": {
+ "type": "Request",
+ "kind": "Http",
+ "inputs": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "ID": { "type": "string" },
+ "WeekEnd": { "type": "string" },
+ "Payload": { "type": "array" }
+ }
+ }
+ }
+}
+
+// CHILD β return result to parent
+"Response_Success": {
+ "type": "Response",
+ "inputs": {
+ "statusCode": 200,
+ "headers": { "Content-Type": "application/json" },
+ "body": { "Result": "Success", "Count": "@length(variables('processed'))" }
+ }
+}
+```
+
+> **`retryPolicy: none`** β critical on the parent's HTTP call. Without it, a child
+> flow timeout triggers retries, spawning duplicate child runs.
+>
+> **`DisableAsyncPattern`** β prevents the parent from treating a 202 Accepted as
+> completion. The parent will block until the child sends its `Response`.
+>
+> **`transferMode: Chunked`** β enable when passing large arrays (>100 KB) to the child;
+> avoids request-size limits.
+>
+> **`limit.timeout: PT2H`** β raise the default 2-minute HTTP timeout for long-running
+> children. Max is PT24H.
+>
+> The child flow's trigger URL contains a SAS token (`sig=...`) that authenticates
+> the call. Copy it from the child flow's trigger properties panel. The URL changes
+> if the trigger is deleted and re-created.
+
+---
+
+### Parse JSON
+
+```json
+"Parse_Response": {
+ "type": "ParseJson",
+ "runAfter": {},
+ "inputs": {
+ "content": "@outputs('Call_External_API')?['body']",
+ "schema": {
+ "type": "object",
+ "properties": {
+ "id": { "type": "integer" },
+ "name": { "type": "string" },
+ "items": {
+ "type": "array",
+ "items": { "type": "object" }
+ }
+ }
+ }
+ }
+}
+```
+
+Access parsed values: `@body('Parse_Response')?['name']`
+
+---
+
+### Manual CSV β JSON (No Premium Action)
+
+Parse a raw CSV string into an array of objects using only built-in expressions.
+Avoids the premium "Parse CSV" connector action.
+
+```json
+"Delimiter": {
+ "type": "Compose",
+ "inputs": ","
+},
+"Strip_Quotes": {
+ "type": "Compose",
+ "inputs": "@replace(body('Get_File_Content'), '\"', '')"
+},
+"Detect_Line_Ending": {
+ "type": "Compose",
+ "inputs": "@if(equals(indexOf(outputs('Strip_Quotes'), decodeUriComponent('%0D%0A')), -1), if(equals(indexOf(outputs('Strip_Quotes'), decodeUriComponent('%0A')), -1), decodeUriComponent('%0D'), decodeUriComponent('%0A')), decodeUriComponent('%0D%0A'))"
+},
+"Headers": {
+ "type": "Compose",
+ "inputs": "@split(first(split(outputs('Strip_Quotes'), outputs('Detect_Line_Ending'))), outputs('Delimiter'))"
+},
+"Data_Rows": {
+ "type": "Compose",
+ "inputs": "@skip(split(outputs('Strip_Quotes'), outputs('Detect_Line_Ending')), 1)"
+},
+"Select_CSV_Body": {
+ "type": "Select",
+ "inputs": {
+ "from": "@outputs('Data_Rows')",
+ "select": {
+ "@{outputs('Headers')[0]}": "@split(item(), outputs('Delimiter'))[0]",
+ "@{outputs('Headers')[1]}": "@split(item(), outputs('Delimiter'))[1]",
+ "@{outputs('Headers')[2]}": "@split(item(), outputs('Delimiter'))[2]"
+ }
+ }
+},
+"Filter_Empty_Rows": {
+ "type": "Query",
+ "inputs": {
+ "from": "@body('Select_CSV_Body')",
+ "where": "@not(equals(item()?[outputs('Headers')[0]], null))"
+ }
+}
+```
+
+Result: `@body('Filter_Empty_Rows')` β array of objects with header names as keys.
+
+> **`Detect_Line_Ending`** handles CRLF (Windows), LF (Unix), and CR (old Mac) automatically
+> using `indexOf()` with `decodeUriComponent('%0D%0A' / '%0A' / '%0D')`.
+>
+> **Dynamic key names in `Select`**: `@{outputs('Headers')[0]}` as a JSON key in a
+> `Select` shape sets the output property name at runtime from the header row β
+> this works as long as the expression is in `@{...}` interpolation syntax.
+>
+> **Columns with embedded commas**: if field values can contain the delimiter,
+> use `length(split(row, ','))` in a Switch to detect the column count and manually
+> reassemble the split fragments: `@concat(split(item(),',')[1],',',split(item(),',')[2])`
+
+---
+
+### ConvertTimeZone (Built-in, No Connector)
+
+Converts a timestamp between timezones with no API call or connector licence cost.
+Format string `"g"` produces short locale date+time (`M/d/yyyy h:mm tt`).
+
+```json
+"Convert_to_Local_Time": {
+ "type": "Expression",
+ "kind": "ConvertTimeZone",
+ "runAfter": {},
+ "inputs": {
+ "baseTime": "@{outputs('UTC_Timestamp')}",
+ "sourceTimeZone": "UTC",
+ "destinationTimeZone": "Taipei Standard Time",
+ "formatString": "g"
+ }
+}
+```
+
+Result reference: `@body('Convert_to_Local_Time')` β **not** `outputs()`, unlike most actions.
+
+Common `formatString` values: `"g"` (short), `"f"` (full), `"yyyy-MM-dd"`, `"HH:mm"`
+
+Common timezone strings: `"UTC"`, `"AUS Eastern Standard Time"`, `"Taipei Standard Time"`,
+`"Singapore Standard Time"`, `"GMT Standard Time"`
+
+> This is `type: Expression, kind: ConvertTimeZone` β a built-in Logic Apps action,
+> not a connector. No connection reference needed. Reference the output via
+> `body()` (not `outputs()`), otherwise the expression returns null.
diff --git a/skills/flowstudio-power-automate-build/references/build-patterns.md b/skills/flowstudio-power-automate-build/references/build-patterns.md
new file mode 100644
index 00000000..b50b10af
--- /dev/null
+++ b/skills/flowstudio-power-automate-build/references/build-patterns.md
@@ -0,0 +1,108 @@
+# Common Build Patterns
+
+Complete flow definition templates ready to copy and customize.
+
+---
+
+## Pattern: Recurrence + SharePoint list read + Teams notification
+
+```json
+{
+ "triggers": {
+ "Recurrence": {
+ "type": "Recurrence",
+ "recurrence": { "frequency": "Day", "interval": 1,
+ "startTime": "2026-01-01T08:00:00Z",
+ "timeZone": "AUS Eastern Standard Time" }
+ }
+ },
+ "actions": {
+ "Get_SP_Items": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "shared_sharepointonline",
+ "operationId": "GetItems"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList",
+ "$filter": "Status eq 'Active'",
+ "$top": 500
+ }
+ }
+ },
+ "Apply_To_Each": {
+ "type": "Foreach",
+ "runAfter": { "Get_SP_Items": ["Succeeded"] },
+ "foreach": "@outputs('Get_SP_Items')?['body/value']",
+ "actions": {
+ "Post_Teams_Message": {
+ "type": "OpenApiConnection",
+ "runAfter": {},
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_teams",
+ "connectionName": "shared_teams",
+ "operationId": "PostMessageToConversation"
+ },
+ "parameters": {
+ "poster": "Flow bot",
+ "location": "Channel",
+ "body/recipient": {
+ "groupId": "",
+ "channelId": ""
+ },
+ "body/messageBody": "Item: @{items('Apply_To_Each')?['Title']}"
+ }
+ }
+ }
+ },
+ "operationOptions": "Sequential"
+ }
+ }
+}
+```
+
+---
+
+## Pattern: HTTP trigger (webhook / Power App call)
+
+```json
+{
+ "triggers": {
+ "manual": {
+ "type": "Request",
+ "kind": "Http",
+ "inputs": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "name": { "type": "string" },
+ "value": { "type": "number" }
+ }
+ }
+ }
+ }
+ },
+ "actions": {
+ "Compose_Response": {
+ "type": "Compose",
+ "runAfter": {},
+ "inputs": "Received: @{triggerBody()?['name']} = @{triggerBody()?['value']}"
+ },
+ "Response": {
+ "type": "Response",
+ "runAfter": { "Compose_Response": ["Succeeded"] },
+ "inputs": {
+ "statusCode": 200,
+ "body": { "status": "ok", "message": "@{outputs('Compose_Response')}" }
+ }
+ }
+ }
+}
+```
+
+Access body values: `@triggerBody()?['name']`
diff --git a/skills/flowstudio-power-automate-build/references/flow-schema.md b/skills/flowstudio-power-automate-build/references/flow-schema.md
new file mode 100644
index 00000000..02210e0a
--- /dev/null
+++ b/skills/flowstudio-power-automate-build/references/flow-schema.md
@@ -0,0 +1,225 @@
+# FlowStudio MCP β Flow Definition Schema
+
+The full JSON structure expected by `update_live_flow` (and returned by `get_live_flow`).
+
+---
+
+## Top-Level Shape
+
+```json
+{
+ "$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#",
+ "contentVersion": "1.0.0.0",
+ "parameters": {
+ "$connections": {
+ "defaultValue": {},
+ "type": "Object"
+ }
+ },
+ "triggers": {
+ "": { ... }
+ },
+ "actions": {
+ "": { ... }
+ },
+ "outputs": {}
+}
+```
+
+---
+
+## `triggers`
+
+Exactly one trigger per flow definition. The key name is arbitrary but
+conventional names are used (e.g. `Recurrence`, `manual`, `When_a_new_email_arrives`).
+
+See [trigger-types.md](trigger-types.md) for all trigger templates.
+
+---
+
+## `actions`
+
+Dictionary of action definitions keyed by unique action name.
+Key names may not contain spaces β use underscores.
+
+Each action must include:
+- `type` β action type identifier
+- `runAfter` β map of upstream action names β status conditions array
+- `inputs` β action-specific input configuration
+
+See [action-patterns-core.md](action-patterns-core.md), [action-patterns-data.md](action-patterns-data.md),
+and [action-patterns-connectors.md](action-patterns-connectors.md) for templates.
+
+### Optional Action Properties
+
+Beyond the required `type`, `runAfter`, and `inputs`, actions can include:
+
+| Property | Purpose |
+|---|---|
+| `runtimeConfiguration` | Pagination, concurrency, secure data, chunked transfer |
+| `operationOptions` | `"Sequential"` for Foreach, `"DisableAsyncPattern"` for HTTP |
+| `limit` | Timeout override (e.g. `{"timeout": "PT2H"}`) |
+
+#### `runtimeConfiguration` Variants
+
+**Pagination** (SharePoint Get Items with large lists):
+```json
+"runtimeConfiguration": {
+ "paginationPolicy": {
+ "minimumItemCount": 5000
+ }
+}
+```
+> Without this, Get Items silently caps at 256 results. Set `minimumItemCount`
+> to the maximum rows you expect. Required for any SharePoint list over 256 items.
+
+**Concurrency** (parallel Foreach):
+```json
+"runtimeConfiguration": {
+ "concurrency": {
+ "repetitions": 20
+ }
+}
+```
+
+**Secure inputs/outputs** (mask values in run history):
+```json
+"runtimeConfiguration": {
+ "secureData": {
+ "properties": ["inputs", "outputs"]
+ }
+}
+```
+> Use on actions that handle credentials, tokens, or PII. Masked values show
+> as `""` in the flow run history UI and API responses.
+
+**Chunked transfer** (large HTTP payloads):
+```json
+"runtimeConfiguration": {
+ "contentTransfer": {
+ "transferMode": "Chunked"
+ }
+}
+```
+> Enable on HTTP actions sending or receiving bodies >100 KB (e.g. parentβchild
+> flow calls with large arrays).
+
+---
+
+## `runAfter` Rules
+
+The first action in a branch has `"runAfter": {}` (empty β runs after trigger).
+
+Subsequent actions declare their dependency:
+
+```json
+"My_Action": {
+ "runAfter": {
+ "Previous_Action": ["Succeeded"]
+ }
+}
+```
+
+Multiple upstream dependencies:
+```json
+"runAfter": {
+ "Action_A": ["Succeeded"],
+ "Action_B": ["Succeeded", "Skipped"]
+}
+```
+
+Error-handling action (runs when upstream failed):
+```json
+"Log_Error": {
+ "runAfter": {
+ "Risky_Action": ["Failed"]
+ }
+}
+```
+
+---
+
+## `parameters` (Flow-Level Input Parameters)
+
+Optional. Define reusable values at the flow level:
+
+```json
+"parameters": {
+ "listName": {
+ "type": "string",
+ "defaultValue": "MyList"
+ },
+ "maxItems": {
+ "type": "integer",
+ "defaultValue": 100
+ }
+}
+```
+
+Reference: `@parameters('listName')` in expression strings.
+
+---
+
+## `outputs`
+
+Rarely used in cloud flows. Leave as `{}` unless the flow is called
+as a child flow and needs to return values.
+
+For child flows that return data:
+
+```json
+"outputs": {
+ "resultData": {
+ "type": "object",
+ "value": "@outputs('Compose_Result')"
+ }
+}
+```
+
+---
+
+## Scoped Actions (Inside Scope Block)
+
+Actions that need to be grouped for error handling or clarity:
+
+```json
+"Scope_Main_Process": {
+ "type": "Scope",
+ "runAfter": {},
+ "actions": {
+ "Step_One": { ... },
+ "Step_Two": { "runAfter": { "Step_One": ["Succeeded"] }, ... }
+ }
+}
+```
+
+---
+
+## Full Minimal Example
+
+```json
+{
+ "$schema": "https://schema.management.azure.com/providers/Microsoft.Logic/schemas/2016-06-01/workflowdefinition.json#",
+ "contentVersion": "1.0.0.0",
+ "triggers": {
+ "Recurrence": {
+ "type": "Recurrence",
+ "recurrence": {
+ "frequency": "Week",
+ "interval": 1,
+ "schedule": { "weekDays": ["Monday"] },
+ "startTime": "2026-01-05T09:00:00Z",
+ "timeZone": "AUS Eastern Standard Time"
+ }
+ }
+ },
+ "actions": {
+ "Compose_Greeting": {
+ "type": "Compose",
+ "runAfter": {},
+ "inputs": "Good Monday!"
+ }
+ },
+ "outputs": {}
+}
+```
diff --git a/skills/flowstudio-power-automate-build/references/trigger-types.md b/skills/flowstudio-power-automate-build/references/trigger-types.md
new file mode 100644
index 00000000..6065f1fa
--- /dev/null
+++ b/skills/flowstudio-power-automate-build/references/trigger-types.md
@@ -0,0 +1,211 @@
+# FlowStudio MCP β Trigger Types
+
+Copy-paste trigger definitions for Power Automate flow definitions.
+
+---
+
+## Recurrence
+
+Run on a schedule.
+
+```json
+"Recurrence": {
+ "type": "Recurrence",
+ "recurrence": {
+ "frequency": "Day",
+ "interval": 1,
+ "startTime": "2026-01-01T08:00:00Z",
+ "timeZone": "AUS Eastern Standard Time"
+ }
+}
+```
+
+Weekly on specific days:
+```json
+"Recurrence": {
+ "type": "Recurrence",
+ "recurrence": {
+ "frequency": "Week",
+ "interval": 1,
+ "schedule": {
+ "weekDays": ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"]
+ },
+ "startTime": "2026-01-05T09:00:00Z",
+ "timeZone": "AUS Eastern Standard Time"
+ }
+}
+```
+
+Common `timeZone` values:
+- `"AUS Eastern Standard Time"` β Sydney/Melbourne (UTC+10/+11)
+- `"UTC"` β Universal time
+- `"E. Australia Standard Time"` β Brisbane (UTC+10 no DST)
+- `"New Zealand Standard Time"` β Auckland (UTC+12/+13)
+- `"Pacific Standard Time"` β Los Angeles (UTC-8/-7)
+- `"GMT Standard Time"` β London (UTC+0/+1)
+
+---
+
+## Manual (HTTP Request / Power Apps)
+
+Receive an HTTP POST with a JSON body.
+
+```json
+"manual": {
+ "type": "Request",
+ "kind": "Http",
+ "inputs": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "name": { "type": "string" },
+ "value": { "type": "integer" }
+ },
+ "required": ["name"]
+ }
+ }
+}
+```
+
+Access values: `@triggerBody()?['name']`
+Trigger URL available after saving: `@listCallbackUrl()`
+
+#### No-Schema Variant (Accept Arbitrary JSON)
+
+When the incoming payload structure is unknown or varies, omit the schema
+to accept any valid JSON body without validation:
+
+```json
+"manual": {
+ "type": "Request",
+ "kind": "Http",
+ "inputs": {
+ "schema": {}
+ }
+}
+```
+
+Access any field dynamically: `@triggerBody()?['anyField']`
+
+> Use this for external webhooks (Stripe, GitHub, Employment Hero, etc.) where the
+> payload shape may change or is not fully documented. The flow accepts any
+> JSON without returning 400 for unexpected properties.
+
+---
+
+## Automated (SharePoint Item Created)
+
+```json
+"When_an_item_is_created": {
+ "type": "OpenApiConnectionNotification",
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "OnNewItem"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList"
+ },
+ "subscribe": {
+ "body": { "notificationUrl": "@listCallbackUrl()" },
+ "queries": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList"
+ }
+ }
+ }
+}
+```
+
+Access trigger data: `@triggerBody()?['ID']`, `@triggerBody()?['Title']`, etc.
+
+---
+
+## Automated (SharePoint Item Modified)
+
+```json
+"When_an_existing_item_is_modified": {
+ "type": "OpenApiConnectionNotification",
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "connectionName": "",
+ "operationId": "OnUpdatedItem"
+ },
+ "parameters": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList"
+ },
+ "subscribe": {
+ "body": { "notificationUrl": "@listCallbackUrl()" },
+ "queries": {
+ "dataset": "https://mytenant.sharepoint.com/sites/mysite",
+ "table": "MyList"
+ }
+ }
+ }
+}
+```
+
+---
+
+## Automated (Outlook: When New Email Arrives)
+
+```json
+"When_a_new_email_arrives": {
+ "type": "OpenApiConnectionNotification",
+ "inputs": {
+ "host": {
+ "apiId": "/providers/Microsoft.PowerApps/apis/shared_office365",
+ "connectionName": "",
+ "operationId": "OnNewEmail"
+ },
+ "parameters": {
+ "folderId": "Inbox",
+ "to": "monitored@contoso.com",
+ "isHTML": true
+ },
+ "subscribe": {
+ "body": { "notificationUrl": "@listCallbackUrl()" }
+ }
+ }
+}
+```
+
+---
+
+## Child Flow (Called by Another Flow)
+
+```json
+"manual": {
+ "type": "Request",
+ "kind": "Button",
+ "inputs": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "items": { "type": "object" }
+ }
+ }
+ }
+ }
+}
+```
+
+Access parent-supplied data: `@triggerBody()?['items']`
+
+To return data to the parent, add a `Response` action:
+```json
+"Respond_to_Parent": {
+ "type": "Response",
+ "runAfter": { "Compose_Result": ["Succeeded"] },
+ "inputs": {
+ "statusCode": 200,
+ "body": "@outputs('Compose_Result')"
+ }
+}
+```
diff --git a/skills/flowstudio-power-automate-debug/SKILL.md b/skills/flowstudio-power-automate-debug/SKILL.md
new file mode 100644
index 00000000..964ca349
--- /dev/null
+++ b/skills/flowstudio-power-automate-debug/SKILL.md
@@ -0,0 +1,322 @@
+---
+name: flowstudio-power-automate-debug
+description: >-
+ Debug failing Power Automate cloud flows using the FlowStudio MCP server.
+ Load this skill when asked to: debug a flow, investigate a failed run, why is
+ this flow failing, inspect action outputs, find the root cause of a flow error,
+ fix a broken Power Automate flow, diagnose a timeout, trace a DynamicOperationRequestFailure,
+ check connector auth errors, read error details from a run, or troubleshoot
+ expression failures. Requires a FlowStudio MCP subscription β see https://mcp.flowstudio.app
+---
+
+# Power Automate Debugging with FlowStudio MCP
+
+A step-by-step diagnostic process for investigating failing Power Automate
+cloud flows through the FlowStudio MCP server.
+
+**Prerequisite**: A FlowStudio MCP server must be reachable with a valid JWT.
+See the `flowstudio-power-automate-mcp` skill for connection setup.
+Subscribe at https://mcp.flowstudio.app
+
+---
+
+## Source of Truth
+
+> **Always call `tools/list` first** to confirm available tool names and their
+> parameter schemas. Tool names and parameters may change between server versions.
+> This skill covers response shapes, behavioral notes, and diagnostic patterns β
+> things `tools/list` cannot tell you. If this document disagrees with `tools/list`
+> or a real API response, the API wins.
+
+---
+
+## Python Helper
+
+```python
+import json, urllib.request
+
+MCP_URL = "https://mcp.flowstudio.app/mcp"
+MCP_TOKEN = ""
+
+def mcp(tool, **kwargs):
+ payload = json.dumps({"jsonrpc": "2.0", "id": 1, "method": "tools/call",
+ "params": {"name": tool, "arguments": kwargs}}).encode()
+ req = urllib.request.Request(MCP_URL, data=payload,
+ headers={"x-api-key": MCP_TOKEN, "Content-Type": "application/json",
+ "User-Agent": "FlowStudio-MCP/1.0"})
+ try:
+ resp = urllib.request.urlopen(req, timeout=120)
+ except urllib.error.HTTPError as e:
+ body = e.read().decode("utf-8", errors="replace")
+ raise RuntimeError(f"MCP HTTP {e.code}: {body[:200]}") from e
+ raw = json.loads(resp.read())
+ if "error" in raw:
+ raise RuntimeError(f"MCP error: {json.dumps(raw['error'])}")
+ return json.loads(raw["result"]["content"][0]["text"])
+
+ENV = "" # e.g. Default-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+```
+
+---
+
+## FlowStudio for Teams: Fast-Path Diagnosis (Skip Steps 2β4)
+
+If you have a FlowStudio for Teams subscription, `get_store_flow_errors`
+returns per-run failure data including action names and remediation hints
+in a single call β no need to walk through live API steps.
+
+```python
+# Quick failure summary
+summary = mcp("get_store_flow_summary", environmentName=ENV, flowName=FLOW_ID)
+# {"totalRuns": 100, "failRuns": 10, "failRate": 0.1,
+# "averageDurationSeconds": 29.4, "maxDurationSeconds": 158.9,
+# "firstFailRunRemediation": ""}
+print(f"Fail rate: {summary['failRate']:.0%} over {summary['totalRuns']} runs")
+
+# Per-run error details (requires active monitoring to be configured)
+errors = mcp("get_store_flow_errors", environmentName=ENV, flowName=FLOW_ID)
+if errors:
+ for r in errors[:3]:
+ print(r["startTime"], "|", r.get("failedActions"), "|", r.get("remediationHint"))
+ # If errors confirms the failing action β jump to Step 6 (apply fix)
+else:
+ # Store doesn't have run-level detail for this flow β use live tools (Steps 2β5)
+ pass
+```
+
+For the full governance record (description, complexity, tier, connector list):
+```python
+record = mcp("get_store_flow", environmentName=ENV, flowName=FLOW_ID)
+# {"displayName": "My Flow", "state": "Started",
+# "runPeriodTotal": 100, "runPeriodFailRate": 0.1, "runPeriodFails": 10,
+# "runPeriodDurationAverage": 29410.8, β milliseconds
+# "runError": "{\"code\": \"EACCES\", ...}", β JSON string, parse it
+# "description": "...", "tier": "Premium", "complexity": "{...}"}
+if record.get("runError"):
+ last_err = json.loads(record["runError"])
+ print("Last run error:", last_err)
+```
+
+---
+
+## Step 1 β Locate the Flow
+
+```python
+result = mcp("list_live_flows", environmentName=ENV)
+# Returns a wrapper object: {mode, flows, totalCount, error}
+target = next(f for f in result["flows"] if "My Flow Name" in f["displayName"])
+FLOW_ID = target["id"] # plain UUID β use directly as flowName
+print(FLOW_ID)
+```
+
+---
+
+## Step 2 β Find the Failing Run
+
+```python
+runs = mcp("get_live_flow_runs", environmentName=ENV, flowName=FLOW_ID, top=5)
+# Returns direct array (newest first):
+# [{"name": "08584296068667933411438594643CU15",
+# "status": "Failed",
+# "startTime": "2026-02-25T06:13:38.6910688Z",
+# "endTime": "2026-02-25T06:15:24.1995008Z",
+# "triggerName": "manual",
+# "error": {"code": "ActionFailed", "message": "An action failed..."}},
+# {"name": "...", "status": "Succeeded", "error": null, ...}]
+
+for r in runs:
+ print(r["name"], r["status"], r["startTime"])
+
+RUN_ID = next(r["name"] for r in runs if r["status"] == "Failed")
+```
+
+---
+
+## Step 3 β Get the Top-Level Error
+
+```python
+err = mcp("get_live_flow_run_error",
+ environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID)
+# Returns:
+# {
+# "runName": "08584296068667933411438594643CU15",
+# "failedActions": [
+# {"actionName": "Apply_to_each_prepare_workers", "status": "Failed",
+# "error": {"code": "ActionFailed", "message": "An action failed..."},
+# "startTime": "...", "endTime": "..."},
+# {"actionName": "HTTP_find_AD_User_by_Name", "status": "Failed",
+# "code": "NotSpecified", "startTime": "...", "endTime": "..."}
+# ],
+# "allActions": [
+# {"actionName": "Apply_to_each", "status": "Skipped"},
+# {"actionName": "Compose_WeekEnd", "status": "Succeeded"},
+# ...
+# ]
+# }
+
+# failedActions is ordered outer-to-inner. The ROOT cause is the LAST entry:
+root = err["failedActions"][-1]
+print(f"Root action: {root['actionName']} β code: {root.get('code')}")
+
+# allActions shows every action's status β useful for spotting what was Skipped
+# See common-errors.md to decode the error code.
+```
+
+---
+
+## Step 4 β Read the Flow Definition
+
+```python
+defn = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID)
+actions = defn["properties"]["definition"]["actions"]
+print(list(actions.keys()))
+```
+
+Find the failing action in the definition. Inspect its `inputs` expression
+to understand what data it expects.
+
+---
+
+## Step 5 β Inspect Action Outputs (Walk Back from Failure)
+
+For each action **leading up to** the failure, inspect its runtime output:
+
+```python
+for action_name in ["Compose_WeekEnd", "HTTP_Get_Data", "Parse_JSON"]:
+ result = mcp("get_live_flow_run_action_outputs",
+ environmentName=ENV,
+ flowName=FLOW_ID,
+ runName=RUN_ID,
+ actionName=action_name)
+ # Returns an array β single-element when actionName is provided
+ out = result[0] if result else {}
+ print(action_name, out.get("status"))
+ print(json.dumps(out.get("outputs", {}), indent=2)[:500])
+```
+
+> β οΈ Output payloads from array-processing actions can be very large.
+> Always slice (e.g. `[:500]`) before printing.
+
+---
+
+## Step 6 β Pinpoint the Root Cause
+
+### Expression Errors (e.g. `split` on null)
+If the error mentions `InvalidTemplate` or a function name:
+1. Find the action in the definition
+2. Check what upstream action/expression it reads
+3. Inspect that upstream action's output for null / missing fields
+
+```python
+# Example: action uses split(item()?['Name'], ' ')
+# β null Name in the source data
+result = mcp("get_live_flow_run_action_outputs", ..., actionName="Compose_Names")
+# Returns a single-element array; index [0] to get the action object
+if not result:
+ print("No outputs returned for Compose_Names")
+ names = []
+else:
+ names = result[0].get("outputs", {}).get("body") or []
+nulls = [x for x in names if x.get("Name") is None]
+print(f"{len(nulls)} records with null Name")
+```
+
+### Wrong Field Path
+Expression `triggerBody()?['fieldName']` returns null β `fieldName` is wrong.
+Check the trigger output shape with:
+```python
+mcp("get_live_flow_run_action_outputs", ..., actionName="")
+```
+
+### Connection / Auth Failures
+Look for `ConnectionAuthorizationFailed` β the connection owner must match the
+service account running the flow. Cannot fix via API; fix in PA designer.
+
+---
+
+## Step 7 β Apply the Fix
+
+**For expression/data issues**:
+```python
+defn = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID)
+acts = defn["properties"]["definition"]["actions"]
+
+# Example: fix split on potentially-null Name
+acts["Compose_Names"]["inputs"] = \
+ "@coalesce(item()?['Name'], 'Unknown')"
+
+conn_refs = defn["properties"]["connectionReferences"]
+result = mcp("update_live_flow",
+ environmentName=ENV,
+ flowName=FLOW_ID,
+ definition=defn["properties"]["definition"],
+ connectionReferences=conn_refs)
+
+print(result.get("error")) # None = success
+```
+
+> β οΈ `update_live_flow` always returns an `error` key.
+> A value of `null` (Python `None`) means success.
+
+---
+
+## Step 8 β Verify the Fix
+
+```python
+# Resubmit the failed run
+resubmit = mcp("resubmit_live_flow_run",
+ environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID)
+print(resubmit)
+
+# Wait ~30 s then check
+import time; time.sleep(30)
+new_runs = mcp("get_live_flow_runs", environmentName=ENV, flowName=FLOW_ID, top=3)
+print(new_runs[0]["status"]) # Succeeded = done
+```
+
+### Testing HTTP-Triggered Flows
+
+For flows with a `Request` (HTTP) trigger, use `trigger_live_flow` instead
+of `resubmit_live_flow_run` to test with custom payloads:
+
+```python
+# First inspect what the trigger expects
+schema = mcp("get_live_flow_http_schema",
+ environmentName=ENV, flowName=FLOW_ID)
+print("Expected body schema:", schema.get("triggerSchema"))
+print("Response schemas:", schema.get("responseSchemas"))
+
+# Trigger with a test payload
+result = mcp("trigger_live_flow",
+ environmentName=ENV,
+ flowName=FLOW_ID,
+ body={"name": "Test User", "value": 42})
+print(f"Status: {result['status']}, Body: {result.get('body')}")
+```
+
+> `trigger_live_flow` handles AAD-authenticated triggers automatically.
+> Only works for flows with a `Request` (HTTP) trigger type.
+
+---
+
+## Quick-Reference Diagnostic Decision Tree
+
+| Symptom | First Tool to Call | What to Look For |
+|---|---|---|
+| Flow shows as Failed | `get_live_flow_run_error` | `failedActions[-1]["actionName"]` = root cause |
+| Expression crash | `get_live_flow_run_action_outputs` on prior action | null / wrong-type fields in output body |
+| Flow never starts | `get_live_flow` | check `properties.state` = "Started" |
+| Action returns wrong data | `get_live_flow_run_action_outputs` | actual output body vs expected |
+| Fix applied but still fails | `get_live_flow_runs` after resubmit | new run `status` field |
+
+---
+
+## Reference Files
+
+- [common-errors.md](references/common-errors.md) β Error codes, likely causes, and fixes
+- [debug-workflow.md](references/debug-workflow.md) β Full decision tree for complex failures
+
+## Related Skills
+
+- `flowstudio-power-automate-mcp` β Core connection setup and operation reference
+- `flowstudio-power-automate-build` β Build and deploy new flows
diff --git a/skills/flowstudio-power-automate-debug/references/common-errors.md b/skills/flowstudio-power-automate-debug/references/common-errors.md
new file mode 100644
index 00000000..bd879b4f
--- /dev/null
+++ b/skills/flowstudio-power-automate-debug/references/common-errors.md
@@ -0,0 +1,188 @@
+# FlowStudio MCP β Common Power Automate Errors
+
+Reference for error codes, likely causes, and recommended fixes when debugging
+Power Automate flows via the FlowStudio MCP server.
+
+---
+
+## Expression / Template Errors
+
+### `InvalidTemplate` β Function Applied to Null
+
+**Full message pattern**: `"Unable to process template language expressions... function 'split' expects its first argument 'text' to be of type string"`
+
+**Root cause**: An expression like `@split(item()?['Name'], ' ')` received a null value.
+
+**Diagnosis**:
+1. Note the action name in the error message
+2. Call `get_live_flow_run_action_outputs` on the action that produces the array
+3. Find items where `Name` (or the referenced field) is `null`
+
+**Fixes**:
+```
+Before: @split(item()?['Name'], ' ')
+After: @split(coalesce(item()?['Name'], ''), ' ')
+
+Or guard the whole foreach body with a condition:
+ expression: "@not(empty(item()?['Name']))"
+```
+
+---
+
+### `InvalidTemplate` β Wrong Expression Path
+
+**Full message pattern**: `"Unable to process template language expressions... 'triggerBody()?['FieldName']' is of type 'Null'"`
+
+**Root cause**: The field name in the expression doesn't match the actual payload schema.
+
+**Diagnosis**:
+```python
+# Check trigger output shape
+mcp("get_live_flow_run_action_outputs",
+ environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID,
+ actionName="")
+# Compare actual keys vs expression
+```
+
+**Fix**: Update expression to use the correct key name. Common mismatches:
+- `triggerBody()?['body']` vs `triggerBody()?['Body']` (case-sensitive)
+- `triggerBody()?['Subject']` vs `triggerOutputs()?['body/Subject']`
+
+---
+
+### `InvalidTemplate` β Type Mismatch
+
+**Full message pattern**: `"... expected type 'Array' but got type 'Object'"`
+
+**Root cause**: Passing an object where the expression expects an array (e.g. a single item HTTP response vs a list response).
+
+**Fix**:
+```
+Before: @outputs('HTTP')?['body']
+After: @outputs('HTTP')?['body/value'] β for OData list responses
+ @createArray(outputs('HTTP')?['body']) β wrap single object in array
+```
+
+---
+
+## Connection / Auth Errors
+
+### `ConnectionAuthorizationFailed`
+
+**Full message**: `"The API connection ... is not authorized."`
+
+**Root cause**: The connection referenced in the flow is owned by a different
+user/service account than the one whose JWT is being used.
+
+**Diagnosis**: Check `properties.connectionReferences` β the `connectionName` GUID
+identifies the owner. Cannot be fixed via API.
+
+**Fix options**:
+1. Open flow in Power Automate designer β re-authenticate the connection
+2. Use a connection owned by the service account whose token you hold
+3. Share the connection with the service account in PA admin
+
+---
+
+### `InvalidConnectionCredentials`
+
+**Root cause**: The underlying OAuth token for the connection has expired or
+the user's credentials changed.
+
+**Fix**: Owner must sign in to Power Automate and refresh the connection.
+
+---
+
+## HTTP Action Errors
+
+### `ActionFailed` β HTTP 4xx/5xx
+
+**Full message pattern**: `"An HTTP request to... failed with status code '400'"`
+
+**Diagnosis**:
+```python
+actions_out = mcp("get_live_flow_run_action_outputs", ..., actionName="HTTP_My_Call")
+item = actions_out[0] # first entry in the returned array
+print(item["outputs"]["statusCode"]) # 400, 401, 403, 500...
+print(item["outputs"]["body"]) # error details from target API
+```
+
+**Common causes**:
+- 401 β missing or expired auth header
+- 403 β permission denied on target resource
+- 404 β wrong URL / resource deleted
+- 400 β malformed JSON body (check expression that builds the body)
+
+---
+
+### `ActionFailed` β HTTP Timeout
+
+**Root cause**: Target endpoint did not respond within the connector's timeout
+(default 90 s for HTTP action).
+
+**Fix**: Add retry policy to the HTTP action, or split the payload into smaller
+batches to reduce per-request processing time.
+
+---
+
+## Control Flow Errors
+
+### `ActionSkipped` Instead of Running
+
+**Root cause**: The `runAfter` condition wasn't met. E.g. an action set to
+`runAfter: { "Prev": ["Succeeded"] }` won't run if `Prev` failed or was skipped.
+
+**Diagnosis**: Check the preceding action's status. Deliberately skipped
+(e.g. inside a false branch) is intentional β unexpected skip is a logic gap.
+
+**Fix**: Add `"Failed"` or `"Skipped"` to the `runAfter` status array if the
+action should run on those outcomes too.
+
+---
+
+### Foreach Runs in Wrong Order / Race Condition
+
+**Root cause**: `Foreach` without `operationOptions: "Sequential"` runs
+iterations in parallel, causing write conflicts or undefined ordering.
+
+**Fix**: Add `"operationOptions": "Sequential"` to the Foreach action.
+
+---
+
+## Update / Deploy Errors
+
+### `update_live_flow` Returns No-Op
+
+**Symptom**: `result["updated"]` is empty list or `result["created"]` is empty.
+
+**Likely cause**: Passing wrong parameter name. The required key is `definition`
+(object), not `flowDefinition` or `body`.
+
+---
+
+### `update_live_flow` β `"Supply connectionReferences"`
+
+**Root cause**: The definition contains `OpenApiConnection` or
+`OpenApiConnectionWebhook` actions but `connectionReferences` was not passed.
+
+**Fix**: Fetch the existing connection references with `get_live_flow` and pass
+them as the `connectionReferences` argument.
+
+---
+
+## Data Logic Errors
+
+### `union()` Overriding Correct Records with Nulls
+
+**Symptom**: After merging two arrays, some records have null fields that existed
+in one of the source arrays.
+
+**Root cause**: `union(old_data, new_data)` β `union()` first-wins, so old_data
+values override new_data for matching records.
+
+**Fix**: Swap argument order: `union(new_data, old_data)`
+
+```
+Before: @sort(union(outputs('Old_Array'), body('New_Array')), 'Date')
+After: @sort(union(body('New_Array'), outputs('Old_Array')), 'Date')
+```
diff --git a/skills/flowstudio-power-automate-debug/references/debug-workflow.md b/skills/flowstudio-power-automate-debug/references/debug-workflow.md
new file mode 100644
index 00000000..c28d86d1
--- /dev/null
+++ b/skills/flowstudio-power-automate-debug/references/debug-workflow.md
@@ -0,0 +1,157 @@
+# FlowStudio MCP β Debug Workflow
+
+End-to-end decision tree for diagnosing Power Automate flow failures.
+
+---
+
+## Top-Level Decision Tree
+
+```
+Flow is failing
+β
+βββ Flow never starts / no runs appear
+β βββ βΊ Check flow State: get_live_flow β properties.state
+β βββ "Stopped" β flow is disabled; enable in PA designer
+β βββ "Started" + no runs β trigger condition not met (check trigger config)
+β
+βββ Flow run shows "Failed"
+β βββ Step A: get_live_flow_run_error β read error.code + error.message
+β β
+β βββ error.code = "InvalidTemplate"
+β β βββ βΊ Expression error (null value, wrong type, bad path)
+β β βββ See: Expression Error Workflow below
+β β
+β βββ error.code = "ConnectionAuthorizationFailed"
+β β βββ βΊ Connection owned by different user; fix in PA designer
+β β
+β βββ error.code = "ActionFailed" + message mentions HTTP
+β β βββ βΊ See: HTTP Action Workflow below
+β β
+β βββ Unknown / generic error
+β βββ βΊ Walk actions backwards (Step B below)
+β
+βββ Flow Succeeds but output is wrong
+ βββ βΊ Inspect intermediate actions with get_live_flow_run_action_outputs
+ βββ See: Data Quality Workflow below
+```
+
+---
+
+## Expression Error Workflow
+
+```
+InvalidTemplate error
+β
+βββ 1. Read error.message β identifies the action name and function
+β
+βββ 2. Get flow definition: get_live_flow
+β βββ Find that action in definition["actions"][action_name]["inputs"]
+β βββ Identify what upstream value the expression reads
+β
+βββ 3. get_live_flow_run_action_outputs for the action BEFORE the failing one
+β βββ Look for null / wrong type in that action's output
+β βββ Null string field β wrap with coalesce(): @coalesce(field, '')
+β βββ Null object β add empty check condition before the action
+β βββ Wrong field name β correct the key (case-sensitive)
+β
+βββ 4. Apply fix with update_live_flow, then resubmit
+```
+
+---
+
+## HTTP Action Workflow
+
+```
+ActionFailed on HTTP action
+β
+βββ 1. get_live_flow_run_action_outputs on the HTTP action
+β βββ Read: outputs.statusCode, outputs.body
+β
+βββ statusCode = 401
+β βββ βΊ Auth header missing or expired OAuth token
+β Check: action inputs.authentication block
+β
+βββ statusCode = 403
+β βββ βΊ Insufficient permission on target resource
+β Check: service principal / user has access
+β
+βββ statusCode = 400
+β βββ βΊ Malformed request body
+β Check: action inputs.body expression; parse errors often in nested JSON
+β
+βββ statusCode = 404
+β βββ βΊ Wrong URL or resource deleted/renamed
+β Check: action inputs.uri expression
+β
+βββ statusCode = 500 / timeout
+ βββ βΊ Target system error; retry policy may help
+ Add: "retryPolicy": {"type": "Fixed", "count": 3, "interval": "PT10S"}
+```
+
+---
+
+## Data Quality Workflow
+
+```
+Flow succeeds but output data is wrong
+β
+βββ 1. Identify the first "wrong" output β which action produces it?
+β
+βββ 2. get_live_flow_run_action_outputs on that action
+β βββ Compare actual output body vs expected
+β
+βββ Source array has nulls / unexpected values
+β βββ Check the trigger data β get_live_flow_run_action_outputs on trigger
+β βββ Trace forward action by action until the value corrupts
+β
+βββ Merge/union has wrong values
+β βββ Check union argument order:
+β union(NEW, old) = new wins β
+β union(OLD, new) = old wins β common bug
+β
+βββ Foreach output missing items
+β βββ Check foreach condition β filter may be too strict
+β βββ Check if parallel foreach caused race condition (add Sequential)
+β
+βββ Date/time values wrong timezone
+ βββ Use convertTimeZone() β utcNow() is always UTC
+```
+
+---
+
+## Walk-Back Analysis (Unknown Failure)
+
+When the error message doesn't clearly name a root cause:
+
+```python
+# 1. Get all action names from definition
+defn = mcp("get_live_flow", environmentName=ENV, flowName=FLOW_ID)
+actions = list(defn["properties"]["definition"]["actions"].keys())
+
+# 2. Check status of each action in the failed run
+for action in actions:
+ actions_out = mcp("get_live_flow_run_action_outputs",
+ environmentName=ENV, flowName=FLOW_ID, runName=RUN_ID,
+ actionName=action)
+ # Returns an array of action objects
+ item = actions_out[0] if actions_out else {}
+ status = item.get("status", "unknown")
+ print(f"{action}: {status}")
+
+# 3. Find the boundary between Succeeded and Failed/Skipped
+# The first Failed action is likely the root cause (unless skipped by design)
+```
+
+Actions inside Foreach / Condition branches may appear nested β
+check the parent action first to confirm the branch ran at all.
+
+---
+
+## Post-Fix Verification Checklist
+
+1. `update_live_flow` returns `error: null` β definition accepted
+2. `resubmit_live_flow_run` confirms new run started
+3. Wait for run completion (poll `get_live_flow_runs` every 15 s)
+4. Confirm new run `status = "Succeeded"`
+5. If flow has downstream consumers (child flows, emails, SharePoint writes),
+ spot-check those too
diff --git a/skills/flowstudio-power-automate-mcp/SKILL.md b/skills/flowstudio-power-automate-mcp/SKILL.md
new file mode 100644
index 00000000..7866ed27
--- /dev/null
+++ b/skills/flowstudio-power-automate-mcp/SKILL.md
@@ -0,0 +1,450 @@
+---
+name: flowstudio-power-automate-mcp
+description: >-
+ Connect to and operate Power Automate cloud flows via a FlowStudio MCP server.
+ Use when asked to: list flows, read a flow definition, check run history, inspect
+ action outputs, resubmit a run, cancel a running flow, view connections, get a
+ trigger URL, validate a definition, monitor flow health, or any task that requires
+ talking to the Power Automate API through an MCP tool. Also use for Power Platform
+ environment discovery and connection management. Requires a FlowStudio MCP
+ subscription or compatible server β see https://mcp.flowstudio.app
+---
+
+# Power Automate via FlowStudio MCP
+
+This skill lets AI agents read, monitor, and operate Microsoft Power Automate
+cloud flows programmatically through a **FlowStudio MCP server** β no browser,
+no UI, no manual steps.
+
+> **Requires:** A [FlowStudio](https://mcp.flowstudio.app) MCP subscription (or
+> compatible Power Automate MCP server). You will need:
+> - MCP endpoint: `https://mcp.flowstudio.app/mcp` (same for all subscribers)
+> - API key / JWT token (`x-api-key` header β NOT Bearer)
+> - Power Platform environment name (e.g. `Default-`)
+
+---
+
+## Source of Truth
+
+| Priority | Source | Covers |
+|----------|--------|--------|
+| 1 | **Real API response** | Always trust what the server actually returns |
+| 2 | **`tools/list`** | Tool names, parameter names, types, required flags |
+| 3 | **SKILL docs & reference files** | Response shapes, behavioral notes, workflow recipes |
+
+> **Start every new session with `tools/list`.**
+> It returns the authoritative, up-to-date schema for every tool β parameter names,
+> types, and required flags. The SKILL docs cover what `tools/list` cannot tell you:
+> response shapes, non-obvious behaviors, and end-to-end workflow patterns.
+>
+> If any documentation disagrees with `tools/list` or a real API response,
+> the API wins.
+
+---
+
+## Recommended Language: Python or Node.js
+
+All examples in this skill and the companion build / debug skills use **Python
+with `urllib.request`** (stdlib β no `pip install` needed). **Node.js** is an
+equally valid choice: `fetch` is built-in from Node 18+, JSON handling is
+native, and the async/await model maps cleanly onto the request-response pattern
+of MCP tool calls β making it a natural fit for teams already working in a
+JavaScript/TypeScript stack.
+
+| Language | Verdict | Notes |
+|---|---|---|
+| **Python** | β Recommended | Clean JSON handling, no escaping issues, all skill examples use it |
+| **Node.js (β₯ 18)** | β Recommended | Native `fetch` + `JSON.stringify`/`JSON.parse`; async/await fits MCP call patterns well; no extra packages needed |
+| PowerShell | β οΈ Avoid for flow operations | `ConvertTo-Json -Depth` silently truncates nested definitions; quoting and escaping break complex payloads. Acceptable for a quick `tools/list` discovery call but not for building or updating flows. |
+| cURL / Bash | β οΈ Possible but fragile | Shell-escaping nested JSON is error-prone; no native JSON parser |
+
+> **TL;DR β use the Core MCP Helper (Python or Node.js) below.** Both handle
+> JSON-RPC framing, auth, and response parsing in a single reusable function.
+
+---
+
+## What You Can Do
+
+FlowStudio MCP has two access tiers. **FlowStudio for Teams** subscribers get
+both the fast Azure-table store (cached snapshot data + governance metadata) and
+full live Power Automate API access. **MCP-only subscribers** get the live tools β
+more than enough to build, debug, and operate flows.
+
+### Live Tools β Available to All MCP Subscribers
+
+| Tool | What it does |
+|---|---|
+| `list_live_flows` | List flows in an environment directly from the PA API (always current) |
+| `list_live_environments` | List all Power Platform environments visible to the service account |
+| `list_live_connections` | List all connections in an environment from the PA API |
+| `get_live_flow` | Fetch the complete flow definition (triggers, actions, parameters) |
+| `get_live_flow_http_schema` | Inspect the JSON body schema and response schemas of an HTTP-triggered flow |
+| `get_live_flow_trigger_url` | Get the current signed callback URL for an HTTP-triggered flow |
+| `trigger_live_flow` | POST to an HTTP-triggered flow's callback URL (AAD auth handled automatically) |
+| `update_live_flow` | Create a new flow or patch an existing definition in one call |
+| `add_live_flow_to_solution` | Migrate a non-solution flow into a solution |
+| `get_live_flow_runs` | List recent run history with status, start/end times, and errors |
+| `get_live_flow_run_error` | Get structured error details (per-action) for a failed run |
+| `get_live_flow_run_action_outputs` | Inspect inputs/outputs of any action (or every foreach iteration) in a run |
+| `resubmit_live_flow_run` | Re-run a failed or cancelled run using its original trigger payload |
+| `cancel_live_flow_run` | Cancel a currently running flow execution |
+
+### Store Tools β FlowStudio for Teams Subscribers Only
+
+These tools read from (and write to) the FlowStudio Azure table β a monitored
+snapshot of your tenant's flows enriched with governance metadata and run statistics.
+
+| Tool | What it does |
+|---|---|
+| `list_store_flows` | Search flows from the cache with governance flags, run failure rates, and owner metadata |
+| `get_store_flow` | Get full cached details for a single flow including run stats and governance fields |
+| `get_store_flow_trigger_url` | Get the trigger URL from the cache (instant, no PA API call) |
+| `get_store_flow_runs` | Cached run history for the last N days with duration and remediation hints |
+| `get_store_flow_errors` | Cached failed-only runs with failed action names and remediation hints |
+| `get_store_flow_summary` | Aggregated stats: success rate, failure count, avg/max duration |
+| `set_store_flow_state` | Start or stop a flow via the PA API and sync the result back to the store |
+| `update_store_flow` | Update governance metadata (description, tags, monitor flag, notification rules, business impact) |
+| `list_store_environments` | List all environments from the cache |
+| `list_store_makers` | List all makers (citizen developers) from the cache |
+| `get_store_maker` | Get a maker's flow/app counts and account status |
+| `list_store_power_apps` | List all Power Apps canvas apps from the cache |
+| `list_store_connections` | List all Power Platform connections from the cache |
+
+---
+
+## Which Tool Tier to Call First
+
+| Task | Tool | Notes |
+|---|---|---|
+| List flows | `list_live_flows` | Always current β calls PA API directly |
+| Read a definition | `get_live_flow` | Always fetched live β not cached |
+| Debug a failure | `get_live_flow_runs` β `get_live_flow_run_error` | Use live run data |
+
+> β οΈ **`list_live_flows` returns a wrapper object** with a `flows` array β access via `result["flows"]`.
+
+> Store tools (`list_store_flows`, `get_store_flow`, etc.) are available to **FlowStudio for Teams** subscribers and provide cached governance metadata. Use live tools when in doubt β they work for all subscription tiers.
+
+---
+
+## Step 0 β Discover Available Tools
+
+Always start by calling `tools/list` to confirm the server is reachable and see
+exactly which tool names are available (names may vary by server version):
+
+```python
+import json, urllib.request
+
+TOKEN = ""
+MCP = "https://mcp.flowstudio.app/mcp"
+
+def mcp_raw(method, params=None, cid=1):
+ payload = {"jsonrpc": "2.0", "method": method, "id": cid}
+ if params:
+ payload["params"] = params
+ req = urllib.request.Request(MCP, data=json.dumps(payload).encode(),
+ headers={"x-api-key": TOKEN, "Content-Type": "application/json",
+ "User-Agent": "FlowStudio-MCP/1.0"})
+ try:
+ resp = urllib.request.urlopen(req, timeout=30)
+ except urllib.error.HTTPError as e:
+ raise RuntimeError(f"MCP HTTP {e.code} β check token and endpoint") from e
+ return json.loads(resp.read())
+
+raw = mcp_raw("tools/list")
+if "error" in raw:
+ print("ERROR:", raw["error"]); raise SystemExit(1)
+for t in raw["result"]["tools"]:
+ print(t["name"], "β", t["description"][:60])
+```
+
+---
+
+## Core MCP Helper (Python)
+
+Use this helper throughout all subsequent operations:
+
+```python
+import json, urllib.request
+
+TOKEN = ""
+MCP = "https://mcp.flowstudio.app/mcp"
+
+def mcp(tool, args, cid=1):
+ payload = {"jsonrpc": "2.0", "method": "tools/call", "id": cid,
+ "params": {"name": tool, "arguments": args}}
+ req = urllib.request.Request(MCP, data=json.dumps(payload).encode(),
+ headers={"x-api-key": TOKEN, "Content-Type": "application/json",
+ "User-Agent": "FlowStudio-MCP/1.0"})
+ try:
+ resp = urllib.request.urlopen(req, timeout=120)
+ except urllib.error.HTTPError as e:
+ body = e.read().decode("utf-8", errors="replace")
+ raise RuntimeError(f"MCP HTTP {e.code}: {body[:200]}") from e
+ raw = json.loads(resp.read())
+ if "error" in raw:
+ raise RuntimeError(f"MCP error: {json.dumps(raw['error'])}")
+ text = raw["result"]["content"][0]["text"]
+ return json.loads(text)
+```
+
+> **Common auth errors:**
+> - HTTP 401/403 β token is missing, expired, or malformed. Get a fresh JWT from [mcp.flowstudio.app](https://mcp.flowstudio.app).
+> - HTTP 400 β malformed JSON-RPC payload. Check `Content-Type: application/json` and body structure.
+> - `MCP error: {"code": -32602, ...}` β wrong or missing tool arguments.
+
+---
+
+## Core MCP Helper (Node.js)
+
+Equivalent helper for Node.js 18+ (built-in `fetch` β no packages required):
+
+```js
+const TOKEN = "";
+const MCP = "https://mcp.flowstudio.app/mcp";
+
+async function mcp(tool, args, cid = 1) {
+ const payload = {
+ jsonrpc: "2.0",
+ method: "tools/call",
+ id: cid,
+ params: { name: tool, arguments: args },
+ };
+ const res = await fetch(MCP, {
+ method: "POST",
+ headers: {
+ "x-api-key": TOKEN,
+ "Content-Type": "application/json",
+ "User-Agent": "FlowStudio-MCP/1.0",
+ },
+ body: JSON.stringify(payload),
+ });
+ if (!res.ok) {
+ const body = await res.text();
+ throw new Error(`MCP HTTP ${res.status}: ${body.slice(0, 200)}`);
+ }
+ const raw = await res.json();
+ if (raw.error) throw new Error(`MCP error: ${JSON.stringify(raw.error)}`);
+ return JSON.parse(raw.result.content[0].text);
+}
+```
+
+> Requires Node.js 18+. For older Node, replace `fetch` with `https.request`
+> from the stdlib or install `node-fetch`.
+
+---
+
+## List Flows
+
+```python
+ENV = "Default-"
+
+result = mcp("list_live_flows", {"environmentName": ENV})
+# Returns wrapper object:
+# {"mode": "owner", "flows": [{"id": "0757041a-...", "displayName": "My Flow",
+# "state": "Started", "triggerType": "Request", ...}], "totalCount": 42, "error": null}
+for f in result["flows"]:
+ FLOW_ID = f["id"] # plain UUID β use directly as flowName
+ print(FLOW_ID, "|", f["displayName"], "|", f["state"])
+```
+
+---
+
+## Read a Flow Definition
+
+```python
+FLOW = ""
+
+flow = mcp("get_live_flow", {"environmentName": ENV, "flowName": FLOW})
+
+# Display name and state
+print(flow["properties"]["displayName"])
+print(flow["properties"]["state"])
+
+# List all action names
+actions = flow["properties"]["definition"]["actions"]
+print("Actions:", list(actions.keys()))
+
+# Inspect one action's expression
+print(actions["Compose_Filter"]["inputs"])
+```
+
+---
+
+## Check Run History
+
+```python
+# Most recent runs (newest first)
+runs = mcp("get_live_flow_runs", {"environmentName": ENV, "flowName": FLOW, "top": 5})
+# Returns direct array:
+# [{"name": "08584296068667933411438594643CU15",
+# "status": "Failed",
+# "startTime": "2026-02-25T06:13:38.6910688Z",
+# "endTime": "2026-02-25T06:15:24.1995008Z",
+# "triggerName": "manual",
+# "error": {"code": "ActionFailed", "message": "An action failed..."}},
+# {"name": "08584296028664130474944675379CU26",
+# "status": "Succeeded", "error": null, ...}]
+
+for r in runs:
+ print(r["name"], r["status"])
+
+# Get the name of the first failed run
+run_id = next((r["name"] for r in runs if r["status"] == "Failed"), None)
+```
+
+---
+
+## Inspect an Action's Output
+
+```python
+run_id = runs[0]["name"]
+
+out = mcp("get_live_flow_run_action_outputs", {
+ "environmentName": ENV,
+ "flowName": FLOW,
+ "runName": run_id,
+ "actionName": "Get_Customer_Record" # exact action name from the definition
+})
+print(json.dumps(out, indent=2))
+```
+
+---
+
+## Get a Run's Error
+
+```python
+err = mcp("get_live_flow_run_error", {
+ "environmentName": ENV,
+ "flowName": FLOW,
+ "runName": run_id
+})
+# Returns:
+# {"runName": "08584296068...",
+# "failedActions": [
+# {"actionName": "HTTP_find_AD_User_by_Name", "status": "Failed",
+# "code": "NotSpecified", "startTime": "...", "endTime": "..."},
+# {"actionName": "Scope_prepare_workers", "status": "Failed",
+# "error": {"code": "ActionFailed", "message": "An action failed..."}}
+# ],
+# "allActions": [
+# {"actionName": "Apply_to_each", "status": "Skipped"},
+# {"actionName": "Compose_WeekEnd", "status": "Succeeded"},
+# ...
+# ]}
+
+# The ROOT cause is usually the deepest entry in failedActions:
+root = err["failedActions"][-1]
+print(f"Root failure: {root['actionName']} β {root['code']}")
+```
+
+---
+
+## Resubmit a Run
+
+```python
+result = mcp("resubmit_live_flow_run", {
+ "environmentName": ENV,
+ "flowName": FLOW,
+ "runName": run_id
+})
+print(result) # {"resubmitted": true, "triggerName": "..."}
+```
+
+---
+
+## Cancel a Running Run
+
+```python
+mcp("cancel_live_flow_run", {
+ "environmentName": ENV,
+ "flowName": FLOW,
+ "runName": run_id
+})
+```
+
+> β οΈ **Do NOT cancel a run that shows `Running` because it is waiting for an
+> adaptive card response.** That status is normal β the flow is paused waiting
+> for a human to respond in Teams. Cancelling it will discard the pending card.
+
+---
+
+## Full Round-Trip Example β Debug and Fix a Failing Flow
+
+```python
+# ββ 1. Find the flow βββββββββββββββββββββββββββββββββββββββββββββββββββββ
+result = mcp("list_live_flows", {"environmentName": ENV})
+target = next(f for f in result["flows"] if "My Flow Name" in f["displayName"])
+FLOW_ID = target["id"]
+
+# ββ 2. Get the most recent failed run ββββββββββββββββββββββββββββββββββββ
+runs = mcp("get_live_flow_runs", {"environmentName": ENV, "flowName": FLOW_ID, "top": 5})
+# [{"name": "08584296068...", "status": "Failed", ...}, ...]
+RUN_ID = next(r["name"] for r in runs if r["status"] == "Failed")
+
+# ββ 3. Get per-action failure breakdown ββββββββββββββββββββββββββββββββββ
+err = mcp("get_live_flow_run_error", {"environmentName": ENV, "flowName": FLOW_ID, "runName": RUN_ID})
+# {"failedActions": [{"actionName": "HTTP_find_AD_User_by_Name", "code": "NotSpecified",...}], ...}
+root_action = err["failedActions"][-1]["actionName"]
+print(f"Root failure: {root_action}")
+
+# ββ 4. Read the definition and inspect the failing action's expression βββ
+defn = mcp("get_live_flow", {"environmentName": ENV, "flowName": FLOW_ID})
+acts = defn["properties"]["definition"]["actions"]
+print("Failing action inputs:", acts[root_action]["inputs"])
+
+# ββ 5. Inspect the prior action's output to find the null ββββββββββββββββ
+out = mcp("get_live_flow_run_action_outputs", {
+ "environmentName": ENV, "flowName": FLOW_ID,
+ "runName": RUN_ID, "actionName": "Compose_Names"
+})
+nulls = [x for x in out.get("body", []) if x.get("Name") is None]
+print(f"{len(nulls)} records with null Name")
+
+# ββ 6. Apply the fix βββββββββββββββββββββββββββββββββββββββββββββββββββββ
+acts[root_action]["inputs"]["parameters"]["searchName"] = \
+ "@coalesce(item()?['Name'], '')"
+
+conn_refs = defn["properties"]["connectionReferences"]
+result = mcp("update_live_flow", {
+ "environmentName": ENV, "flowName": FLOW_ID,
+ "definition": defn["properties"]["definition"],
+ "connectionReferences": conn_refs
+})
+assert result.get("error") is None, f"Deploy failed: {result['error']}"
+# β οΈ error key is always present β only fail if it is NOT None
+
+# ββ 7. Resubmit and verify βββββββββββββββββββββββββββββββββββββββββββββββ
+mcp("resubmit_live_flow_run", {"environmentName": ENV, "flowName": FLOW_ID, "runName": RUN_ID})
+
+import time; time.sleep(30)
+new_runs = mcp("get_live_flow_runs", {"environmentName": ENV, "flowName": FLOW_ID, "top": 1})
+print(new_runs[0]["status"]) # Succeeded = done
+```
+
+---
+
+## Auth & Connection Notes
+
+| Field | Value |
+|---|---|
+| Auth header | `x-api-key: ` β **not** `Authorization: Bearer` |
+| Token format | Plain JWT β do not strip, alter, or prefix it |
+| Timeout | Use β₯ 120 s for `get_live_flow_run_action_outputs` (large outputs) |
+| Environment name | `Default-` (find it via `list_live_environments` or `list_live_flows` response) |
+
+---
+
+## Reference Files
+
+- [MCP-BOOTSTRAP.md](references/MCP-BOOTSTRAP.md) β endpoint, auth, request/response format (read this first)
+- [tool-reference.md](references/tool-reference.md) β response shapes and behavioral notes (parameters are in `tools/list`)
+- [action-types.md](references/action-types.md) β Power Automate action type patterns
+- [connection-references.md](references/connection-references.md) β connector reference guide
+
+---
+
+## More Capabilities
+
+For **diagnosing failing flows** end-to-end β load the `power-automate-debug` skill.
+
+For **building and deploying new flows** β load the `power-automate-build` skill.
diff --git a/skills/flowstudio-power-automate-mcp/references/MCP-BOOTSTRAP.md b/skills/flowstudio-power-automate-mcp/references/MCP-BOOTSTRAP.md
new file mode 100644
index 00000000..7dc71605
--- /dev/null
+++ b/skills/flowstudio-power-automate-mcp/references/MCP-BOOTSTRAP.md
@@ -0,0 +1,53 @@
+# MCP Bootstrap β Quick Reference
+
+Everything an agent needs to start calling the FlowStudio MCP server.
+
+```
+Endpoint: https://mcp.flowstudio.app/mcp
+Protocol: JSON-RPC 2.0 over HTTP POST
+Transport: Streamable HTTP β single POST per request, no SSE, no WebSocket
+Auth: x-api-key header with JWT token (NOT Bearer)
+```
+
+## Required Headers
+
+```
+Content-Type: application/json
+x-api-key:
+User-Agent: FlowStudio-MCP/1.0 β required, or Cloudflare blocks you
+```
+
+## Step 1 β Discover Tools
+
+```json
+POST {"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}
+```
+
+Returns all tools with names, descriptions, and input schemas.
+Free β not counted against plan limits.
+
+## Step 2 β Call a Tool
+
+```json
+POST {"jsonrpc":"2.0","id":1,"method":"tools/call",
+ "params":{"name":"","arguments":{...}}}
+```
+
+## Response Shape
+
+```
+Success β {"result":{"content":[{"type":"text","text":""}]}}
+Error β {"result":{"content":[{"type":"text","text":"{\"error\":{...}}"}]}}
+```
+
+Always parse `result.content[0].text` as JSON to get the actual data.
+
+## Key Tips
+
+- Tool results are JSON strings inside the text field β **double-parse needed**
+- `"error"` field in parsed body: `null` = success, object = failure
+- `environmentName` is required for most tools, but **not** for:
+ `list_live_environments`, `list_live_connections`, `list_store_flows`,
+ `list_store_environments`, `list_store_makers`, `get_store_maker`,
+ `list_store_power_apps`, `list_store_connections`
+- When in doubt, check the `required` array in each tool's schema from `tools/list`
diff --git a/skills/flowstudio-power-automate-mcp/references/action-types.md b/skills/flowstudio-power-automate-mcp/references/action-types.md
new file mode 100644
index 00000000..42507ce7
--- /dev/null
+++ b/skills/flowstudio-power-automate-mcp/references/action-types.md
@@ -0,0 +1,79 @@
+# FlowStudio MCP β Action Types Reference
+
+Compact lookup for recognising action types returned by `get_live_flow`.
+Use this to **read and understand** existing flow definitions.
+
+> For full copy-paste construction patterns, see the `power-automate-build` skill.
+
+---
+
+## How to Read a Flow Definition
+
+Every action has `"type"`, `"runAfter"`, and `"inputs"`. The `runAfter` object
+declares dependencies: `{"Previous": ["Succeeded"]}`. Valid statuses:
+`Succeeded`, `Failed`, `Skipped`, `TimedOut`.
+
+---
+
+## Action Type Quick Reference
+
+| Type | Purpose | Key fields to inspect | Output reference |
+|---|---|---|---|
+| `Compose` | Store/transform a value | `inputs` (any expression) | `outputs('Name')` |
+| `InitializeVariable` | Declare a variable | `inputs.variables[].{name, type, value}` | `variables('name')` |
+| `SetVariable` | Update a variable | `inputs.{name, value}` | `variables('name')` |
+| `IncrementVariable` | Increment a numeric variable | `inputs.{name, value}` | `variables('name')` |
+| `AppendToArrayVariable` | Push to an array variable | `inputs.{name, value}` | `variables('name')` |
+| `If` | Conditional branch | `expression.and/or`, `actions`, `else.actions` | β |
+| `Switch` | Multi-way branch | `expression`, `cases.{case, actions}`, `default` | β |
+| `Foreach` | Loop over array | `foreach`, `actions`, `operationOptions` | `item()` / `items('Name')` |
+| `Until` | Loop until condition | `expression`, `limit.{count, timeout}`, `actions` | β |
+| `Wait` | Delay | `inputs.interval.{count, unit}` | β |
+| `Scope` | Group / try-catch | `actions` (nested action map) | `result('Name')` |
+| `Terminate` | End run | `inputs.{runStatus, runError}` | β |
+| `OpenApiConnection` | Connector call (SP, Outlook, Teamsβ¦) | `inputs.host.{apiId, connectionName, operationId}`, `inputs.parameters` | `outputs('Name')?['body/...']` |
+| `OpenApiConnectionWebhook` | Webhook wait (approvals, adaptive cards) | same as above | `body('Name')?['...']` |
+| `Http` | External HTTP call | `inputs.{method, uri, headers, body}` | `outputs('Name')?['body']` |
+| `Response` | Return to HTTP caller | `inputs.{statusCode, headers, body}` | β |
+| `Query` | Filter array | `inputs.{from, where}` | `body('Name')` (filtered array) |
+| `Select` | Reshape/project array | `inputs.{from, select}` | `body('Name')` (projected array) |
+| `Table` | Array β CSV/HTML string | `inputs.{from, format, columns}` | `body('Name')` (string) |
+| `ParseJson` | Parse JSON with schema | `inputs.{content, schema}` | `body('Name')?['field']` |
+| `Expression` | Built-in function (e.g. ConvertTimeZone) | `kind`, `inputs` | `body('Name')` |
+
+---
+
+## Connector Identification
+
+When you see `type: OpenApiConnection`, identify the connector from `host.apiId`:
+
+| apiId suffix | Connector |
+|---|---|
+| `shared_sharepointonline` | SharePoint |
+| `shared_office365` | Outlook / Office 365 |
+| `shared_teams` | Microsoft Teams |
+| `shared_approvals` | Approvals |
+| `shared_office365users` | Office 365 Users |
+| `shared_flowmanagement` | Flow Management |
+
+The `operationId` tells you the specific operation (e.g. `GetItems`, `SendEmailV2`,
+`PostMessageToConversation`). The `connectionName` maps to a GUID in
+`properties.connectionReferences`.
+
+---
+
+## Common Expressions (Reading Cheat Sheet)
+
+| Expression | Meaning |
+|---|---|
+| `@outputs('X')?['body/value']` | Array result from connector action X |
+| `@body('X')` | Direct body of action X (Query, Select, ParseJson) |
+| `@item()?['Field']` | Current loop item's field |
+| `@triggerBody()?['Field']` | Trigger payload field |
+| `@variables('name')` | Variable value |
+| `@coalesce(a, b)` | First non-null of a, b |
+| `@first(array)` | First element (null if empty) |
+| `@length(array)` | Array count |
+| `@empty(value)` | True if null/empty string/empty array |
+| `@union(a, b)` | Merge arrays β **first wins** on duplicates |
+| `@result('Scope')` | Array of action outcomes inside a Scope |
diff --git a/skills/flowstudio-power-automate-mcp/references/connection-references.md b/skills/flowstudio-power-automate-mcp/references/connection-references.md
new file mode 100644
index 00000000..08e83984
--- /dev/null
+++ b/skills/flowstudio-power-automate-mcp/references/connection-references.md
@@ -0,0 +1,115 @@
+# FlowStudio MCP β Connection References
+
+Connection references wire a flow's connector actions to real authenticated
+connections in the Power Platform. They are required whenever you call
+`update_live_flow` with a definition that uses connector actions.
+
+---
+
+## Structure in a Flow Definition
+
+```json
+{
+ "properties": {
+ "definition": { ... },
+ "connectionReferences": {
+ "shared_sharepointonline": {
+ "connectionName": "shared-sharepointonl-62599557c-1f33-4aec-b4c0-a6e4afcae3be",
+ "id": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline",
+ "displayName": "SharePoint"
+ },
+ "shared_office365": {
+ "connectionName": "shared-office365-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
+ "id": "/providers/Microsoft.PowerApps/apis/shared_office365",
+ "displayName": "Office 365 Outlook"
+ }
+ }
+ }
+}
+```
+
+Keys are **logical reference names** (e.g. `shared_sharepointonline`).
+These match the `connectionName` field inside each action's `host` block.
+
+---
+
+## Finding Connection GUIDs
+
+Call `get_live_flow` on **any existing flow** that uses the same connection
+and copy the `connectionReferences` block. The GUID after the connector prefix is
+the connection instance owned by the authenticating user.
+
+```python
+flow = mcp("get_live_flow", environmentName=ENV, flowName=EXISTING_FLOW_ID)
+conn_refs = flow["properties"]["connectionReferences"]
+# conn_refs["shared_sharepointonline"]["connectionName"]
+# β "shared-sharepointonl-62599557c-1f33-4aec-b4c0-a6e4afcae3be"
+```
+
+> β οΈ Connection references are **user-scoped**. If a connection is owned
+> by another account, `update_live_flow` will return 403
+> `ConnectionAuthorizationFailed`. You must use a connection belonging to
+> the account whose token is in the `x-api-key` header.
+
+---
+
+## Passing `connectionReferences` to `update_live_flow`
+
+```python
+result = mcp("update_live_flow",
+ environmentName=ENV,
+ flowName=FLOW_ID,
+ definition=modified_definition,
+ connectionReferences={
+ "shared_sharepointonline": {
+ "connectionName": "shared-sharepointonl-62599557c-1f33-4aec-b4c0-a6e4afcae3be",
+ "id": "/providers/Microsoft.PowerApps/apis/shared_sharepointonline"
+ }
+ }
+)
+```
+
+Only include connections that the definition actually uses.
+
+---
+
+## Common Connector API IDs
+
+| Service | API ID |
+|---|---|
+| SharePoint Online | `/providers/Microsoft.PowerApps/apis/shared_sharepointonline` |
+| Office 365 Outlook | `/providers/Microsoft.PowerApps/apis/shared_office365` |
+| Microsoft Teams | `/providers/Microsoft.PowerApps/apis/shared_teams` |
+| OneDrive for Business | `/providers/Microsoft.PowerApps/apis/shared_onedriveforbusiness` |
+| Azure AD | `/providers/Microsoft.PowerApps/apis/shared_azuread` |
+| HTTP with Azure AD | `/providers/Microsoft.PowerApps/apis/shared_webcontents` |
+| SQL Server | `/providers/Microsoft.PowerApps/apis/shared_sql` |
+| Dataverse | `/providers/Microsoft.PowerApps/apis/shared_commondataserviceforapps` |
+| Azure Blob Storage | `/providers/Microsoft.PowerApps/apis/shared_azureblob` |
+| Approvals | `/providers/Microsoft.PowerApps/apis/shared_approvals` |
+| Office 365 Users | `/providers/Microsoft.PowerApps/apis/shared_office365users` |
+| Flow Management | `/providers/Microsoft.PowerApps/apis/shared_flowmanagement` |
+
+---
+
+## Teams Adaptive Card Dual-Connection Requirement
+
+Flows that send adaptive cards **and** post follow-up messages require two
+separate Teams connections:
+
+```json
+"connectionReferences": {
+ "shared_teams": {
+ "connectionName": "shared-teams-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
+ "id": "/providers/Microsoft.PowerApps/apis/shared_teams"
+ },
+ "shared_teams_1": {
+ "connectionName": "shared-teams-yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy",
+ "id": "/providers/Microsoft.PowerApps/apis/shared_teams"
+ }
+}
+```
+
+Both can point to the **same underlying Teams account** but must be registered
+as two distinct connection references. The webhook (`OpenApiConnectionWebhook`)
+uses `shared_teams` and subsequent message actions use `shared_teams_1`.
diff --git a/skills/flowstudio-power-automate-mcp/references/tool-reference.md b/skills/flowstudio-power-automate-mcp/references/tool-reference.md
new file mode 100644
index 00000000..b447a9c0
--- /dev/null
+++ b/skills/flowstudio-power-automate-mcp/references/tool-reference.md
@@ -0,0 +1,445 @@
+# FlowStudio MCP β Tool Response Catalog
+
+Response shapes and behavioral notes for the FlowStudio Power Automate MCP server.
+
+> **For tool names and parameters**: Always call `tools/list` on the server.
+> It returns the authoritative, up-to-date schema for every tool.
+> This document covers what `tools/list` does NOT tell you: **response shapes**
+> and **non-obvious behaviors** discovered through real usage.
+
+---
+
+## Source of Truth
+
+| Priority | Source | Covers |
+|----------|--------|--------|
+| 1 | **Real API response** | Always trust what the server actually returns |
+| 2 | **`tools/list`** | Tool names, parameter names, types, required flags |
+| 3 | **This document** | Response shapes, behavioral notes, gotchas |
+
+> If this document disagrees with `tools/list` or real API behavior,
+> the API wins. Update this document accordingly.
+
+---
+
+## Environment & Tenant Discovery
+
+### `list_live_environments`
+
+Response: direct array of environments.
+```json
+[
+ {
+ "id": "Default-26e65220-5561-46ef-9783-ce5f20489241",
+ "displayName": "FlowStudio (default)",
+ "sku": "Production",
+ "location": "australia",
+ "state": "Enabled",
+ "isDefault": true,
+ "isAdmin": true,
+ "isMember": true,
+ "createdTime": "2023-08-18T00:41:05Z"
+ }
+]
+```
+
+> Use the `id` value as `environmentName` in all other tools.
+
+### `list_store_environments`
+
+Same shape as `list_live_environments` but read from cache (faster).
+
+---
+
+## Connection Discovery
+
+### `list_live_connections`
+
+Response: wrapper object with `connections` array.
+```json
+{
+ "connections": [
+ {
+ "id": "shared-office365-9f9d2c8e-55f1-49c9-9f9c-1c45d1fbbdce",
+ "displayName": "user@contoso.com",
+ "connectorName": "shared_office365",
+ "createdBy": "User Name",
+ "statuses": [{"status": "Connected"}],
+ "createdTime": "2024-03-12T21:23:55.206815Z"
+ }
+ ],
+ "totalCount": 56,
+ "error": null
+}
+```
+
+> **Key field**: `id` is the `connectionName` value used in `connectionReferences`.
+>
+> **Key field**: `connectorName` maps to apiId:
+> `"/providers/Microsoft.PowerApps/apis/" + connectorName`
+>
+> Filter by status: `statuses[0].status == "Connected"`.
+>
+> **Note**: `tools/list` marks `environmentName` as optional, but the server
+> returns `MissingEnvironmentFilter` (HTTP 400) if you omit it. Always pass
+> `environmentName`.
+
+### `list_store_connections`
+
+Same connection data from cache.
+
+---
+
+## Flow Discovery & Listing
+
+### `list_live_flows`
+
+Response: wrapper object with `flows` array.
+```json
+{
+ "mode": "owner",
+ "flows": [
+ {
+ "id": "0757041a-8ef2-cf74-ef06-06881916f371",
+ "displayName": "My Flow",
+ "state": "Started",
+ "triggerType": "Request",
+ "triggerKind": "Http",
+ "createdTime": "2023-08-18T01:18:17Z",
+ "lastModifiedTime": "2023-08-18T12:47:42Z",
+ "owners": "",
+ "definitionAvailable": true
+ }
+ ],
+ "totalCount": 100,
+ "error": null
+}
+```
+
+> Access via `result["flows"]`. `id` is a plain UUID --- use directly as `flowName`.
+>
+> `mode` indicates the access scope used (`"owner"` or `"admin"`).
+
+### `list_store_flows`
+
+Response: **direct array** (no wrapper).
+```json
+[
+ {
+ "id": "3991358a-f603-e49d-b1ed-a9e4f72e2dcb.0757041a-8ef2-cf74-ef06-06881916f371",
+ "displayName": "Admin | Sync Template v3 (Solutions)",
+ "state": "Started",
+ "triggerType": "OpenApiConnectionWebhook",
+ "environmentName": "3991358a-f603-e49d-b1ed-a9e4f72e2dcb",
+ "runPeriodTotal": 100,
+ "createdTime": "2023-08-18T01:18:17Z",
+ "lastModifiedTime": "2023-08-18T12:47:42Z"
+ }
+]
+```
+
+> **`id` format**: `envId.flowId` --- split on the first `.` to extract the flow UUID:
+> `flow_id = item["id"].split(".", 1)[1]`
+
+### `get_store_flow`
+
+Response: single flow metadata from cache (selected fields).
+```json
+{
+ "id": "envId.flowId",
+ "displayName": "My Flow",
+ "state": "Started",
+ "triggerType": "Recurrence",
+ "runPeriodTotal": 100,
+ "runPeriodFailRate": 0.1,
+ "runPeriodSuccessRate": 0.9,
+ "runPeriodFails": 10,
+ "runPeriodSuccess": 90,
+ "runPeriodDurationAverage": 29410.8,
+ "runPeriodDurationMax": 158900.0,
+ "runError": "{\"code\": \"EACCES\", ...}",
+ "description": "Flow description",
+ "tier": "Premium",
+ "complexity": "{...}",
+ "actions": 42,
+ "connections": ["sharepointonline", "office365"],
+ "owners": ["user@contoso.com"],
+ "createdBy": "user@contoso.com"
+}
+```
+
+> `runPeriodDurationAverage` / `runPeriodDurationMax` are in **milliseconds** (divide by 1000).
+> `runError` is a **JSON string** --- parse with `json.loads()`.
+
+---
+
+## Flow Definition (Live API)
+
+### `get_live_flow`
+
+Response: full flow definition from PA API.
+```json
+{
+ "name": "",
+ "properties": {
+ "displayName": "My Flow",
+ "state": "Started",
+ "definition": {
+ "triggers": { "..." },
+ "actions": { "..." },
+ "parameters": { "..." }
+ },
+ "connectionReferences": { "..." }
+ }
+}
+```
+
+### `update_live_flow`
+
+**Create mode**: Omit `flowName` --- creates a new flow. `definition` and `displayName` required.
+
+**Update mode**: Provide `flowName` --- PATCHes existing flow.
+
+Response:
+```json
+{
+ "created": false,
+ "flowKey": "envId.flowId",
+ "updated": ["definition", "connectionReferences"],
+ "displayName": "My Flow",
+ "state": "Started",
+ "definition": { "...full definition..." },
+ "error": null
+}
+```
+
+> `error` is **always present** but may be `null`. Check `result.get("error") is not None`.
+>
+> On create: `created` is the new flow GUID (string). On update: `created` is `false`.
+>
+> `description` is **always required** (create and update).
+
+### `add_live_flow_to_solution`
+
+Migrates a non-solution flow into a solution. Returns error if already in a solution.
+
+---
+
+## Run History & Monitoring
+
+### `get_live_flow_runs`
+
+Response: direct array of runs (newest first).
+```json
+[{
+ "name": "",
+ "status": "Succeeded|Failed|Running|Cancelled",
+ "startTime": "2026-02-25T06:13:38Z",
+ "endTime": "2026-02-25T06:14:02Z",
+ "triggerName": "Recurrence",
+ "error": null
+}]
+```
+
+> `top` defaults to **30** and auto-paginates for higher values. Set `top: 300`
+> for 24-hour coverage on flows running every 5 minutes.
+>
+> Run ID field is **`name`** (not `runName`). Use this value as the `runName`
+> parameter in other tools.
+
+### `get_live_flow_run_error`
+
+Response: structured error breakdown for a failed run.
+```json
+{
+ "runName": "08584296068667933411438594643CU15",
+ "failedActions": [
+ {
+ "actionName": "Apply_to_each_prepare_workers",
+ "status": "Failed",
+ "error": {"code": "ActionFailed", "message": "An action failed."},
+ "code": "ActionFailed",
+ "startTime": "2026-02-25T06:13:52Z",
+ "endTime": "2026-02-25T06:15:24Z"
+ },
+ {
+ "actionName": "HTTP_find_AD_User_by_Name",
+ "status": "Failed",
+ "code": "NotSpecified",
+ "startTime": "2026-02-25T06:14:01Z",
+ "endTime": "2026-02-25T06:14:05Z"
+ }
+ ],
+ "allActions": [
+ {"actionName": "Apply_to_each", "status": "Skipped"},
+ {"actionName": "Compose_WeekEnd", "status": "Succeeded"},
+ {"actionName": "HTTP_find_AD_User_by_Name", "status": "Failed"}
+ ]
+}
+```
+
+> `failedActions` is ordered outer-to-inner --- the **last entry is the root cause**.
+> Use `failedActions[-1]["actionName"]` as the starting point for diagnosis.
+
+### `get_live_flow_run_action_outputs`
+
+Response: array of action detail objects.
+```json
+[
+ {
+ "actionName": "Compose_WeekEnd_now",
+ "status": "Succeeded",
+ "startTime": "2026-02-25T06:13:52Z",
+ "endTime": "2026-02-25T06:13:52Z",
+ "error": null,
+ "inputs": "Mon, 25 Feb 2026 06:13:52 GMT",
+ "outputs": "Mon, 25 Feb 2026 06:13:52 GMT"
+ }
+]
+```
+
+> **`actionName` is optional**: omit it to return ALL actions in the run;
+> provide it to return a single-element array for that action only.
+>
+> Outputs can be very large (50 MB+) for bulk-data actions. Use 120s+ timeout.
+
+---
+
+## Run Control
+
+### `resubmit_live_flow_run`
+
+Response: `{ flowKey, resubmitted: true, runName, triggerName }`
+
+### `cancel_live_flow_run`
+
+Cancels a `Running` flow run.
+
+> Do NOT cancel runs waiting for an adaptive card response --- status `Running`
+> is normal while a Teams card is awaiting user input.
+
+---
+
+## HTTP Trigger Tools
+
+### `get_live_flow_http_schema`
+
+Response keys:
+```
+flowKey - Flow GUID
+displayName - Flow display name
+triggerName - Trigger action name (e.g. "manual")
+triggerType - Trigger type (e.g. "Request")
+triggerKind - Trigger kind (e.g. "Http")
+requestMethod - HTTP method (e.g. "POST")
+relativePath - Relative path configured on the trigger (if any)
+requestSchema - JSON schema the trigger expects as POST body
+requestHeaders - Headers the trigger expects
+responseSchemas - Array of JSON schemas defined on Response action(s)
+responseSchemaCount - Number of Response actions that define output schemas
+```
+
+> The request body schema is in `requestSchema` (not `triggerSchema`).
+
+### `get_live_flow_trigger_url`
+
+Returns the signed callback URL for HTTP-triggered flows. Response includes
+`flowKey`, `triggerName`, `triggerType`, `triggerKind`, `triggerMethod`, `triggerUrl`.
+
+### `trigger_live_flow`
+
+Response keys: `flowKey`, `triggerName`, `triggerUrl`, `requiresAadAuth`, `authType`,
+`responseStatus`, `responseBody`.
+
+> **Only works for `Request` (HTTP) triggers.** Returns an error for Recurrence
+> and other trigger types: `"only HTTP Request triggers can be invoked via this tool"`.
+>
+> `responseStatus` + `responseBody` contain the flow's Response action output.
+> AAD-authenticated triggers are handled automatically.
+
+---
+
+## Flow State Management
+
+### `set_store_flow_state`
+
+Start or stop a flow. Pass `state: "Started"` or `state: "Stopped"`.
+
+---
+
+## Store Tools --- FlowStudio for Teams Only
+
+### `get_store_flow_summary`
+
+Response: aggregated run statistics.
+```json
+{
+ "totalRuns": 100,
+ "failRuns": 10,
+ "failRate": 0.1,
+ "averageDurationSeconds": 29.4,
+ "maxDurationSeconds": 158.9,
+ "firstFailRunRemediation": ""
+}
+```
+
+### `get_store_flow_runs`
+
+Cached run history for the last N days with duration and remediation hints.
+
+### `get_store_flow_errors`
+
+Cached failed-only runs with failed action names and remediation hints.
+
+### `get_store_flow_trigger_url`
+
+Trigger URL from cache (instant, no PA API call).
+
+### `update_store_flow`
+
+Update governance metadata (description, tags, monitor flag, notification rules, business impact).
+
+### `list_store_makers` / `get_store_maker`
+
+Maker (citizen developer) discovery and detail.
+
+### `list_store_power_apps`
+
+List all Power Apps canvas apps from the cache.
+
+---
+
+## Behavioral Notes
+
+Non-obvious behaviors discovered through real API usage. These are things
+`tools/list` cannot tell you.
+
+### `get_live_flow_run_action_outputs`
+- **`actionName` is optional**: omit to get all actions, provide to get one.
+ This changes the response from N elements to 1 element (still an array).
+- Outputs can be 50 MB+ for bulk-data actions --- always use 120s+ timeout.
+
+### `update_live_flow`
+- `description` is **always required** (create and update modes).
+- `error` key is **always present** in response --- `null` means success.
+ Do NOT check `if "error" in result`; check `result.get("error") is not None`.
+- On create, `created` = new flow GUID (string). On update, `created` = `false`.
+
+### `trigger_live_flow`
+- **Only works for HTTP Request triggers.** Returns error for Recurrence, connector,
+ and other trigger types.
+- AAD-authenticated triggers are handled automatically (impersonated Bearer token).
+
+### `get_live_flow_runs`
+- `top` defaults to **30** with automatic pagination for higher values.
+- Run ID field is `name`, not `runName`. Use this value as `runName` in other tools.
+- Runs are returned newest-first.
+
+### Teams `PostMessageToConversation` (via `update_live_flow`)
+- **"Chat with Flow bot"**: `body/recipient` = `"user@domain.com;"` (string with trailing semicolon).
+- **"Channel"**: `body/recipient` = `{"groupId": "...", "channelId": "..."}` (object).
+- `poster`: `"Flow bot"` for Workflows bot identity, `"User"` for user identity.
+
+### `list_live_connections`
+- `id` is the value you need for `connectionName` in `connectionReferences`.
+- `connectorName` maps to apiId: `"/providers/Microsoft.PowerApps/apis/" + connectorName`.
diff --git a/skills/github-copilot-starter/SKILL.md b/skills/github-copilot-starter/SKILL.md
index ba196b0f..f3c687eb 100644
--- a/skills/github-copilot-starter/SKILL.md
+++ b/skills/github-copilot-starter/SKILL.md
@@ -12,51 +12,82 @@ Ask the user for the following information if not provided:
1. **Primary Language/Framework**: (e.g., JavaScript/React, Python/Django, Java/Spring Boot, etc.)
2. **Project Type**: (e.g., web app, API, mobile app, desktop app, library, etc.)
3. **Additional Technologies**: (e.g., database, cloud provider, testing frameworks, etc.)
-4. **Team Size**: (solo, small team, enterprise)
-5. **Development Style**: (strict standards, flexible, specific patterns)
+4. **Development Style**: (strict standards, flexible, specific patterns)
+5. **GitHub Actions / Coding Agent**: Does the project use GitHub Actions? (yes/no β determines whether to generate `copilot-setup-steps.yml`)
## Configuration Files to Create
Based on the provided stack, create the following files in the appropriate directories:
### 1. `.github/copilot-instructions.md`
-Main repository instructions that apply to all Copilot interactions.
+Main repository instructions that apply to all Copilot interactions. This is the most important file β Copilot reads it for every interaction in the repository.
+
+Use this structure:
+```md
+# {Project Name} β Copilot Instructions
+
+## Project Overview
+Brief description of what this project does and its primary purpose.
+
+## Tech Stack
+List the primary language, frameworks, and key dependencies.
+
+## Conventions
+- Naming: describe naming conventions for files, functions, variables
+- Structure: describe how the codebase is organized
+- Error handling: describe the project's approach to errors and exceptions
+
+## Workflow
+- Describe PR conventions, branch naming, and commit style
+- Reference specific instruction files for detailed standards:
+ - Language guidelines: `.github/instructions/{language}.instructions.md`
+ - Testing: `.github/instructions/testing.instructions.md`
+ - Security: `.github/instructions/security.instructions.md`
+ - Documentation: `.github/instructions/documentation.instructions.md`
+ - Performance: `.github/instructions/performance.instructions.md`
+ - Code review: `.github/instructions/code-review.instructions.md`
+```
### 2. `.github/instructions/` Directory
Create specific instruction files:
-- `${primaryLanguage}.instructions.md` - Language-specific guidelines
+- `{primaryLanguage}.instructions.md` - Language-specific guidelines
- `testing.instructions.md` - Testing standards and practices
- `documentation.instructions.md` - Documentation requirements
- `security.instructions.md` - Security best practices
- `performance.instructions.md` - Performance optimization guidelines
- `code-review.instructions.md` - Code review standards and GitHub review guidelines
-### 3. `.github/prompts/` Directory
-Create reusable prompt files:
-- `setup-component.prompt.md` - Component/module creation
-- `write-tests.prompt.md` - Test generation
-- `code-review.prompt.md` - Code review assistance
-- `refactor-code.prompt.md` - Code refactoring
-- `generate-docs.prompt.md` - Documentation generation
-- `debug-issue.prompt.md` - Debugging assistance
+### 3. `.github/skills/` Directory
+Create reusable skills as self-contained folders:
+- `setup-component/SKILL.md` - Component/module creation
+- `write-tests/SKILL.md` - Test generation
+- `code-review/SKILL.md` - Code review assistance
+- `refactor-code/SKILL.md` - Code refactoring
+- `generate-docs/SKILL.md` - Documentation generation
+- `debug-issue/SKILL.md` - Debugging assistance
### 4. `.github/agents/` Directory
-Create specialized chat modes:
-- `architect.agent.md` - Architecture planning mode
-- `reviewer.agent.md` - Code review mode
-- `debugger.agent.md` - Debugging mode
+Always create these 4 agents:
+- `software-engineer.agent.md`
+- `architect.agent.md`
+- `reviewer.agent.md`
+- `debugger.agent.md`
-**Chat Mode Attribution**: When using content from awesome-copilot chatmodes, add attribution comments:
+For each, fetch the most specific match from awesome-copilot agents. If none exists, use the generic template.
+
+**Agent Attribution**: When using content from awesome-copilot agents, add attribution comments:
```markdown
```
-### 5. `.github/workflows/` Directory
+### 5. `.github/workflows/` Directory (only if user uses GitHub Actions)
+Skip this section entirely if the user answered "no" to GitHub Actions.
+
Create Coding Agent workflow file:
- `copilot-setup-steps.yml` - GitHub Actions workflow for Coding Agent environment setup
**CRITICAL**: The workflow MUST follow this exact structure:
-- Job name MUST be `copilot-setup-steps`
+- Job name MUST be `copilot-setup-steps`
- Include proper triggers (workflow_dispatch, push, pull_request on the workflow file)
- Set appropriate permissions (minimum required)
- Customize steps based on the technology stack provided
@@ -66,9 +97,10 @@ Create Coding Agent workflow file:
For each file, follow these principles:
**MANDATORY FIRST STEP**: Always use the fetch tool to research existing patterns before creating any content:
-1. **Fetch from awesome-copilot collections**: https://github.com/github/awesome-copilot/blob/main/docs/README.collections.md
-2. **Fetch specific instruction files**: https://raw.githubusercontent.com/github/awesome-copilot/main/instructions/[relevant-file].instructions.md
-3. **Check for existing patterns** that match the technology stack
+1. **Fetch specific instruction from awesome-copilot docs**: https://github.com/github/awesome-copilot/blob/main/docs/README.instructions.md
+2. **Fetch specific agents from awesome-copilot docs**: https://github.com/github/awesome-copilot/blob/main/docs/README.agents.md
+3. **Fetch specific skills from awesome-copilot docs**: https://github.com/github/awesome-copilot/blob/main/docs/README.skills.md
+4. **Check for existing patterns** that match the technology stack
**Primary Approach**: Reference and adapt existing instructions from awesome-copilot repository:
- **Use existing content** when available - don't reinvent the wheel
@@ -77,12 +109,12 @@ For each file, follow these principles:
- **ALWAYS add attribution comments** when using awesome-copilot content
**Attribution Format**: When using content from awesome-copilot, add this comment at the top of the file:
-```markdown
+```md
```
**Examples:**
-```markdown
+```md
---
applyTo: "**/*.jsx,**/*.tsx"
@@ -92,7 +124,7 @@ description: "React development best practices"
...
```
-```markdown
+```md
---
@@ -128,20 +160,19 @@ description: "Java Spring Boot development standards"
**Research Strategy with fetch tool:**
1. **Check awesome-copilot first** - Always start here for ALL file types
2. **Look for exact tech stack matches** (e.g., React, Node.js, Spring Boot)
-3. **Look for general matches** (e.g., frontend chatmodes, testing prompts, review modes)
-4. **Check awesome-copilot collections** for curated sets of related files
-5. **Adapt community examples** to project needs
+3. **Look for general matches** (e.g., frontend agents, testing skills, review workflows)
+4. **Check the docs and relevant directories directly** for related files
+5. **Prefer repo-native examples** over inventing new formats
6. **Only create custom content** if nothing relevant exists
**Fetch these awesome-copilot directories:**
- **Instructions**: https://github.com/github/awesome-copilot/tree/main/instructions
-- **Prompts**: https://github.com/github/awesome-copilot/tree/main/prompts
-- **Chat Modes**: https://github.com/github/awesome-copilot/tree/main/chatmodes
-- **Collections**: https://github.com/github/awesome-copilot/blob/main/docs/README.collections.md
+- **Agents**: https://github.com/github/awesome-copilot/tree/main/agents
+- **Skills**: https://github.com/github/awesome-copilot/tree/main/skills
-**Awesome-Copilot Collections to Check:**
+**Awesome-Copilot Areas to Check:**
- **Frontend Web Development**: React, Angular, Vue, TypeScript, CSS frameworks
-- **C# .NET Development**: Testing, documentation, and best practices
+- **C# .NET Development**: Testing, documentation, and best practices
- **Java Development**: Spring Boot, Quarkus, testing, documentation
- **Database Development**: PostgreSQL, SQL Server, and general database best practices
- **Azure Development**: Infrastructure as Code, serverless functions
@@ -162,77 +193,73 @@ project-root/
β β βββ security.instructions.md
β β βββ performance.instructions.md
β β βββ code-review.instructions.md
-β βββ prompts/
-β β βββ setup-component.prompt.md
-β β βββ write-tests.prompt.md
-β β βββ code-review.prompt.md
-β β βββ refactor-code.prompt.md
-β β βββ generate-docs.prompt.md
-β β βββ debug-issue.prompt.md
+β βββ skills/
+β β βββ setup-component/
+β β β βββ SKILL.md
+β β βββ write-tests/
+β β β βββ SKILL.md
+β β βββ code-review/
+β β β βββ SKILL.md
+β β βββ refactor-code/
+β β β βββ SKILL.md
+β β βββ generate-docs/
+β β β βββ SKILL.md
+β β βββ debug-issue/
+β β βββ SKILL.md
β βββ agents/
+β β βββ software-engineer.agent.md
β β βββ architect.agent.md
β β βββ reviewer.agent.md
β β βββ debugger.agent.md
-β βββ workflows/
+β βββ workflows/ # only if GitHub Actions is used
β βββ copilot-setup-steps.yml
```
## YAML Frontmatter Template
-Use this frontmatter structure for all files:
+Use this structure for all files:
**Instructions (.instructions.md):**
-```yaml
+```md
---
-applyTo: "**/*.ts,**/*.tsx"
+applyTo: "**/*.{lang-ext}"
+description: "Development standards for {Language}"
---
-# Project coding standards for TypeScript and React
+# {Language} coding standards
-Apply the [general coding guidelines](./general-coding.instructions.md) to all code.
+Apply the repository-wide guidance from `../copilot-instructions.md` to all code.
-## TypeScript Guidelines
-- Use TypeScript for all new code
-- Follow functional programming principles where possible
-- Use interfaces for data structures and type definitions
-- Prefer immutable data (const, readonly)
-- Use optional chaining (?.) and nullish coalescing (??) operators
-
-## React Guidelines
-- Use functional components with hooks
-- Follow the React hooks rules (no conditional hooks)
-- Use React.FC type for components with children
-- Keep components small and focused
-- Use CSS modules for component styling
+## General Guidelines
+- Follow the project's established conventions and patterns
+- Prefer clear, readable code over clever abstractions
+- Use the language's idiomatic style and recommended practices
+- Keep modules focused and appropriately sized
+
```
-**Prompts (.prompt.md):**
-```yaml
+**Skills (SKILL.md):**
+```md
---
-agent: 'agent'
-model: Claude Sonnet 4
-tools: ['githubRepo', 'codebase']
-description: 'Generate a new React form component'
+name: {skill-name}
+description: {Brief description of what this skill does}
---
-Your goal is to generate a new React form component based on the templates in #githubRepo contoso/react-templates.
-Ask for the form name and fields if not provided.
+# {Skill Name}
-Requirements for the form:
-* Use form design system components: [design-system/Form.md](../docs/design-system/Form.md)
-* Use `react-hook-form` for form state management:
-* Always define TypeScript types for your form data
-* Prefer *uncontrolled* components using register
-* Use `defaultValues` to prevent unnecessary rerenders
-* Use `yup` for validation:
-* Create reusable validation schemas in separate files
-* Use TypeScript types to ensure type safety
-* Customize UX-friendly validation rules
+{One sentence describing what this skill does. Always follow the repository's established patterns.}
+Ask for {required inputs} if not provided.
+
+## Requirements
+- Use the existing design system and repository conventions
+- Follow the project's established patterns and style
+- Adapt to the specific technology choices of this stack
+- Reuse existing validation and documentation patterns
```
-**Chat Modes (.agent.md):**
-```yaml
+**Agents (.agent.md):**
+```md
---
description: Generate an implementation plan for new features or refactoring existing code.
tools: ['codebase', 'web/fetch', 'findTestFiles', 'githubRepo', 'search', 'usages']
@@ -248,43 +275,48 @@ The plan consists of a Markdown document that describes the implementation plan,
* Requirements: A list of requirements for the feature or refactoring task.
* Implementation Steps: A detailed list of steps to implement the feature or refactoring task.
* Testing: A list of tests that need to be implemented to verify the feature or refactoring task.
-
```
## Execution Steps
-1. **Analyze the provided technology stack**
-2. **Create the directory structure**
-3. **Generate main copilot-instructions.md with project-wide standards**
-4. **Create language-specific instruction files using awesome-copilot references**
-5. **Generate reusable prompts for common development tasks**
-6. **Set up specialized chat modes for different development scenarios**
-7. **Create the GitHub Actions workflow for Coding Agent** (`copilot-setup-steps.yml`)
-8. **Validate all files follow proper formatting and include necessary frontmatter**
+1. **Gather project information** - Ask the user for technology stack, project type, and development style if not provided
+2. **Research awesome-copilot patterns**:
+ - Use the fetch tool to explore awesome-copilot directories
+ - Check instructions: https://github.com/github/awesome-copilot/tree/main/instructions
+ - Check agents: https://github.com/github/awesome-copilot/tree/main/agents (especially for matching expert agents)
+ - Check skills: https://github.com/github/awesome-copilot/tree/main/skills
+ - Document all sources for attribution comments
+3. **Create the directory structure**
+4. **Generate main copilot-instructions.md** with project-wide standards
+5. **Create language-specific instruction files** using awesome-copilot references with attribution
+6. **Generate reusable skills** tailored to project needs
+7. **Set up specialized agents**, fetching from awesome-copilot where applicable (especially for expert engineer agents matching the tech stack)
+8. **Create the GitHub Actions workflow for Coding Agent** (`copilot-setup-steps.yml`) β skip if user does not use GitHub Actions
+9. **Validate** all files follow proper formatting and include necessary frontmatter
## Post-Setup Instructions
After creating all files, provide the user with:
1. **VS Code setup instructions** - How to enable and configure the files
-2. **Usage examples** - How to use each prompt and chat mode
+2. **Usage examples** - How to use each skill and agent
3. **Customization tips** - How to modify files for their specific needs
4. **Testing recommendations** - How to verify the setup works correctly
## Quality Checklist
Before completing, verify:
-- [ ] All files have proper YAML frontmatter
+- [ ] All authored Copilot markdown files have proper YAML frontmatter where required
- [ ] Language-specific best practices are included
- [ ] Files reference each other appropriately using Markdown links
-- [ ] Prompts include relevant tools and variables
+- [ ] Skills and agents include relevant descriptions; include MCP/tool-related metadata only when the target Copilot environment actually supports or requires it
- [ ] Instructions are comprehensive but not overwhelming
- [ ] Security and performance considerations are addressed
- [ ] Testing guidelines are included
- [ ] Documentation standards are clear
- [ ] Code review standards are defined
-## Workflow Template Structure
+## Workflow Template Structure (only if GitHub Actions is used)
The `copilot-setup-steps.yml` workflow MUST follow this exact format and KEEP IT SIMPLE:
diff --git a/skills/github-issues/SKILL.md b/skills/github-issues/SKILL.md
index 48b41f39..4619bacf 100644
--- a/skills/github-issues/SKILL.md
+++ b/skills/github-issues/SKILL.md
@@ -1,69 +1,87 @@
---
name: github-issues
-description: 'Create, update, and manage GitHub issues using MCP tools. Use this skill when users want to create bug reports, feature requests, or task issues, update existing issues, add labels/assignees/milestones, set issue fields (dates, priority, custom fields), set issue types, or manage issue workflows. Triggers on requests like "create an issue", "file a bug", "request a feature", "update issue X", "set the priority", "set the start date", or any GitHub issue management task.'
+description: 'Create, update, and manage GitHub issues using MCP tools. Use this skill when users want to create bug reports, feature requests, or task issues, update existing issues, add labels/assignees/milestones, set issue fields (dates, priority, custom fields), set issue types, manage issue workflows, link issues, add dependencies, or track blocked-by/blocking relationships. Triggers on requests like "create an issue", "file a bug", "request a feature", "update issue X", "set the priority", "set the start date", "link issues", "add dependency", "blocked by", "blocking", or any GitHub issue management task.'
---
# GitHub Issues
Manage GitHub issues using the `@modelcontextprotocol/server-github` MCP server.
-## Available MCP Tools
+## Available Tools
+
+### MCP Tools (read operations)
| Tool | Purpose |
|------|---------|
-| `mcp__github__create_issue` | Create new issues |
-| `mcp__github__update_issue` | Update existing issues |
-| `mcp__github__get_issue` | Fetch issue details |
-| `mcp__github__search_issues` | Search issues |
-| `mcp__github__add_issue_comment` | Add comments |
-| `mcp__github__list_issues` | List repository issues |
-| `mcp__github__list_issue_types` | List available issue types for an organization |
-| `mcp__github__issue_read` | Read issue details, sub-issues, comments, labels |
+| `mcp__github__issue_read` | Read issue details, sub-issues, comments, labels (methods: get, get_comments, get_sub_issues, get_labels) |
+| `mcp__github__list_issues` | List and filter repository issues by state, labels, date |
+| `mcp__github__search_issues` | Search issues across repos using GitHub search syntax |
| `mcp__github__projects_list` | List projects, project fields, project items, status updates |
| `mcp__github__projects_get` | Get details of a project, field, item, or status update |
| `mcp__github__projects_write` | Add/update/delete project items, create status updates |
+### CLI / REST API (write operations)
+
+The MCP server does not currently support creating, updating, or commenting on issues. Use `gh api` for these operations.
+
+| Operation | Command |
+|-----------|---------|
+| Create issue | `gh api repos/{owner}/{repo}/issues -X POST -f title=... -f body=...` |
+| Update issue | `gh api repos/{owner}/{repo}/issues/{number} -X PATCH -f title=... -f state=...` |
+| Add comment | `gh api repos/{owner}/{repo}/issues/{number}/comments -X POST -f body=...` |
+| Close issue | `gh api repos/{owner}/{repo}/issues/{number} -X PATCH -f state=closed` |
+| Set issue type | Include `-f type=Bug` in the create call (REST API only, not supported by `gh issue create` CLI) |
+
+**Note:** `gh issue create` works for basic issue creation but does **not** support the `--type` flag. Use `gh api` when you need to set issue types.
+
## Workflow
1. **Determine action**: Create, update, or query?
2. **Gather context**: Get repo info, existing labels, milestones if needed
3. **Structure content**: Use appropriate template from [references/templates.md](references/templates.md)
-4. **Execute**: Call the appropriate MCP tool
+4. **Execute**: Use MCP tools for reads, `gh api` for writes
5. **Confirm**: Report the issue URL to user
## Creating Issues
-### Required Parameters
+Use `gh api` to create issues. This supports all parameters including issue types.
-```
-owner: repository owner (org or user)
-repo: repository name
-title: clear, actionable title
-body: structured markdown content
+```bash
+gh api repos/{owner}/{repo}/issues \
+ -X POST \
+ -f title="Issue title" \
+ -f body="Issue body in markdown" \
+ -f type="Bug" \
+ --jq '{number, html_url}'
```
### Optional Parameters
+Add any of these flags to the `gh api` call:
+
```
-labels: ["bug", "enhancement", "documentation", ...]
-assignees: ["username1", "username2"]
-milestone: milestone number (integer)
-type: issue type name (e.g., "Bug", "Feature", "Task", "Epic")
+-f type="Bug" # Issue type (Bug, Feature, Task, Epic, etc.)
+-f labels[]="bug" # Labels (repeat for multiple)
+-f assignees[]="username" # Assignees (repeat for multiple)
+-f milestone=1 # Milestone number
```
-**Issue types** are organization-level metadata. Before using `type`, call `mcp__github__list_issue_types` with the org name to discover available types. If the org has no issue types configured, omit the parameter.
+**Issue types** are organization-level metadata. To discover available types, use:
+```bash
+gh api graphql -f query='{ organization(login: "ORG") { issueTypes(first: 10) { nodes { name } } } }' --jq '.data.organization.issueTypes.nodes[].name'
+```
**Prefer issue types over labels for categorization.** When issue types are available (e.g., Bug, Feature, Task), use the `type` parameter instead of applying equivalent labels like `bug` or `enhancement`. Issue types are the canonical way to categorize issues on GitHub. Only fall back to labels when the org has no issue types configured.
### Title Guidelines
-- Start with type prefix when useful: `[Bug]`, `[Feature]`, `[Docs]`
- Be specific and actionable
- Keep under 72 characters
+- When issue types are set, don't add redundant prefixes like `[Bug]`
- Examples:
- - `[Bug] Login fails with SSO enabled`
- - `[Feature] Add dark mode support`
- - `Add unit tests for auth module`
+ - `Login fails with SSO enabled` (with type=Bug)
+ - `Add dark mode support` (with type=Feature)
+ - `Add unit tests for auth module` (with type=Task)
### Body Structure
@@ -77,14 +95,17 @@ Always use the templates in [references/templates.md](references/templates.md).
## Updating Issues
-Use `mcp__github__update_issue` with:
+Use `gh api` with PATCH:
-```
-owner, repo, issue_number (required)
-title, body, state, labels, assignees, milestone (optional - only changed fields)
+```bash
+gh api repos/{owner}/{repo}/issues/{number} \
+ -X PATCH \
+ -f state=closed \
+ -f title="Updated title" \
+ --jq '{number, html_url}'
```
-State values: `open`, `closed`
+Only include fields you want to change. Available fields: `title`, `body`, `state` (open/closed), `labels`, `assignees`, `milestone`.
## Examples
@@ -92,31 +113,54 @@ State values: `open`, `closed`
**User**: "Create a bug issue - the login page crashes when using SSO"
-**Action**: Call `mcp__github__create_issue` with:
-```json
-{
- "owner": "github",
- "repo": "awesome-copilot",
- "title": "[Bug] Login page crashes when using SSO",
- "body": "## Description\nThe login page crashes when users attempt to authenticate using SSO.\n\n## Steps to Reproduce\n1. Navigate to login page\n2. Click 'Sign in with SSO'\n3. Page crashes\n\n## Expected Behavior\nSSO authentication should complete and redirect to dashboard.\n\n## Actual Behavior\nPage becomes unresponsive and displays error.\n\n## Environment\n- Browser: [To be filled]\n- OS: [To be filled]\n\n## Additional Context\nReported by user.",
- "type": "Bug"
-}
+**Action**:
+```bash
+gh api repos/github/awesome-copilot/issues \
+ -X POST \
+ -f title="Login page crashes when using SSO" \
+ -f type="Bug" \
+ -f body="## Description
+The login page crashes when users attempt to authenticate using SSO.
+
+## Steps to Reproduce
+1. Navigate to login page
+2. Click 'Sign in with SSO'
+3. Page crashes
+
+## Expected Behavior
+SSO authentication should complete and redirect to dashboard.
+
+## Actual Behavior
+Page becomes unresponsive and displays error." \
+ --jq '{number, html_url}'
```
### Example 2: Feature Request
**User**: "Create a feature request for dark mode with high priority"
-**Action**: Call `mcp__github__create_issue` with:
-```json
-{
- "owner": "github",
- "repo": "awesome-copilot",
- "title": "[Feature] Add dark mode support",
- "body": "## Summary\nAdd dark mode theme option for improved user experience and accessibility.\n\n## Motivation\n- Reduces eye strain in low-light environments\n- Increasingly expected by users\n- Improves accessibility\n\n## Proposed Solution\nImplement theme toggle with system preference detection.\n\n## Acceptance Criteria\n- [ ] Toggle switch in settings\n- [ ] Persists user preference\n- [ ] Respects system preference by default\n- [ ] All UI components support both themes\n\n## Alternatives Considered\nNone specified.\n\n## Additional Context\nHigh priority request.",
- "type": "Feature",
- "labels": ["high-priority"]
-}
+**Action**:
+```bash
+gh api repos/github/awesome-copilot/issues \
+ -X POST \
+ -f title="Add dark mode support" \
+ -f type="Feature" \
+ -f labels[]="high-priority" \
+ -f body="## Summary
+Add dark mode theme option for improved user experience and accessibility.
+
+## Motivation
+- Reduces eye strain in low-light environments
+- Increasingly expected by users
+
+## Proposed Solution
+Implement theme toggle with system preference detection.
+
+## Acceptance Criteria
+- [ ] Toggle switch in settings
+- [ ] Persists user preference
+- [ ] Respects system preference by default" \
+ --jq '{number, html_url}'
```
## Common Labels
@@ -148,8 +192,10 @@ The following features require REST or GraphQL APIs beyond the basic MCP tools.
| Capability | When to use | Reference |
|------------|-------------|-----------|
+| Advanced search | Complex queries with boolean logic, date ranges, cross-repo search, issue field filters (`field.name:value`) | [references/search.md](references/search.md) |
| Sub-issues & parent issues | Breaking work into hierarchical tasks | [references/sub-issues.md](references/sub-issues.md) |
| Issue dependencies | Tracking blocked-by / blocking relationships | [references/dependencies.md](references/dependencies.md) |
| Issue types (advanced) | GraphQL operations beyond MCP `list_issue_types` / `type` param | [references/issue-types.md](references/issue-types.md) |
| Projects V2 | Project boards, progress reports, field management | [references/projects.md](references/projects.md) |
| Issue fields | Custom metadata: dates, priority, text, numbers (private preview) | [references/issue-fields.md](references/issue-fields.md) |
+| Images in issues | Embedding images in issue bodies and comments via CLI | [references/images.md](references/images.md) |
diff --git a/skills/github-issues/references/dependencies.md b/skills/github-issues/references/dependencies.md
index 6b3b09c9..6ad7562a 100644
--- a/skills/github-issues/references/dependencies.md
+++ b/skills/github-issues/references/dependencies.md
@@ -44,7 +44,7 @@ mutation {
issueId: "BLOCKED_ISSUE_NODE_ID"
blockingIssueId: "BLOCKING_ISSUE_NODE_ID"
}) {
- blockedByIssue { number title }
+ blockingIssue { number title }
}
}
```
@@ -56,7 +56,7 @@ mutation {
issueId: "BLOCKED_ISSUE_NODE_ID"
blockingIssueId: "BLOCKING_ISSUE_NODE_ID"
}) {
- blockedByIssue { number title }
+ blockingIssue { number title }
}
}
```
diff --git a/skills/github-issues/references/images.md b/skills/github-issues/references/images.md
new file mode 100644
index 00000000..f6dec631
--- /dev/null
+++ b/skills/github-issues/references/images.md
@@ -0,0 +1,116 @@
+# Images in Issues and Comments
+
+How to embed images in GitHub issue bodies and comments programmatically via the CLI.
+
+## Methods (ranked by reliability)
+
+### 1. GitHub Contents API (recommended for private repos)
+
+Push image files to a branch in the same repo, then reference them with a URL that works for authenticated viewers.
+
+**Step 1: Create a branch**
+
+```bash
+# Get the SHA of the default branch
+SHA=$(gh api repos/{owner}/{repo}/git/ref/heads/main --jq '.object.sha')
+
+# Create a new branch
+gh api repos/{owner}/{repo}/git/refs -X POST \
+ -f ref="refs/heads/{username}/images" \
+ -f sha="$SHA"
+```
+
+**Step 2: Upload images via Contents API**
+
+```bash
+# Base64-encode the image and upload
+BASE64=$(base64 -i /path/to/image.png)
+
+gh api repos/{owner}/{repo}/contents/docs/images/my-image.png \
+ -X PUT \
+ -f message="Add image" \
+ -f content="$BASE64" \
+ -f branch="{username}/images" \
+ --jq '.content.path'
+```
+
+Repeat for each image. The Contents API creates a commit per file.
+
+**Step 3: Reference in markdown**
+
+```markdown
+
+```
+
+> **Important:** Use `github.com/{owner}/{repo}/raw/{branch}/{path}` format, NOT `raw.githubusercontent.com`. The `raw.githubusercontent.com` URLs return 404 for private repos. The `github.com/.../raw/...` format works because the browser sends auth cookies when the viewer is logged in and has repo access.
+
+**Pros:** Works for any repo the viewer has access to, images live in version control, no expiration.
+**Cons:** Creates commits, viewers must be authenticated, images won't render in email notifications or for users without repo access.
+
+### 2. Gist hosting (public images only)
+
+Upload images as files in a gist. Only works for images you're comfortable making public.
+
+```bash
+# Create a gist with a placeholder file
+gh gist create --public -f description.md <<< "Image hosting gist"
+
+# Note: gh gist edit does NOT support binary files.
+# You must use the API to add binary content to gists.
+```
+
+> **Limitation:** Gists don't support binary file uploads via the CLI. You'd need to base64-encode and store as text, which won't render as images. Not recommended.
+
+### 3. Browser upload (most reliable rendering)
+
+The most reliable way to get permanent image URLs is through the GitHub web UI:
+
+1. Open the issue/comment in a browser
+2. Drag-drop or paste the image into the comment editor
+3. GitHub generates a permanent `https://github.com/user-attachments/assets/{UUID}` URL
+4. These URLs work for anyone, even without repo access, and render in email notifications
+
+> **Why the API can't do this:** GitHub's `upload/policies/assets` endpoint requires a browser session (CSRF token + cookies). It returns an HTML error page when called with API tokens. There is no public API for generating `user-attachments` URLs.
+
+## Taking screenshots programmatically
+
+Use `puppeteer-core` with local Chrome to screenshot HTML mockups:
+
+```javascript
+const puppeteer = require('puppeteer-core');
+
+const browser = await puppeteer.launch({
+ executablePath: '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
+ defaultViewport: { width: 900, height: 600, deviceScaleFactor: 2 }
+});
+
+const page = await browser.newPage();
+await page.setContent(htmlString);
+
+// Screenshot specific elements
+const elements = await page.$$('.section');
+for (let i = 0; i < elements.length; i++) {
+ await elements[i].screenshot({ path: `mockup-${i + 1}.png` });
+}
+
+await browser.close();
+```
+
+> **Note:** MCP Playwright may not connect to localhost due to network isolation. Use puppeteer-core with a local Chrome installation instead.
+
+## Quick reference
+
+| Method | Private repos | Permanent | No auth needed | API-only |
+|--------|:---:|:---:|:---:|:---:|
+| Contents API + `github.com/raw/` | β | β | β | β |
+| Browser drag-drop (`user-attachments`) | β | β | β | β |
+| `raw.githubusercontent.com` | β (404) | β | β | β |
+| Gist | Public only | β | β | β (no binary) |
+
+## Common pitfalls
+
+- **`raw.githubusercontent.com` returns 404 for private repos** even with a valid token in the URL. GitHub's CDN does not pass auth headers through.
+- **API download URLs are temporary.** URLs returned by `gh api repos/.../contents/...` with `download_url` include a token that expires.
+- **`upload/policies/assets` requires a browser session.** Do not attempt to call this endpoint from the CLI.
+- **Base64 encoding for large files** can hit API payload limits. The Contents API has a ~100MB file size limit but practical limits are lower for base64-encoded payloads.
+- **Email notifications** will not render images that require authentication. If email readability matters, use the browser upload method.
diff --git a/skills/github-issues/references/issue-fields.md b/skills/github-issues/references/issue-fields.md
index d60e4e49..4ab668ce 100644
--- a/skills/github-issues/references/issue-fields.md
+++ b/skills/github-issues/references/issue-fields.md
@@ -125,3 +125,67 @@ mutation {
}
}'
```
+
+## Searching by field values
+
+### GraphQL bulk query (recommended)
+
+The most reliable way to find issues by field value is to fetch issues via GraphQL and filter by `issueFieldValues`. The search qualifier syntax (`field.name:value`) is not yet reliable across all environments.
+
+```bash
+# Find all open P1 issues in a repo
+gh api graphql -H "GraphQL-Features: issue_fields" -f query='
+{
+ repository(owner: "OWNER", name: "REPO") {
+ issues(first: 100, states: OPEN) {
+ nodes {
+ number
+ title
+ updatedAt
+ assignees(first: 3) { nodes { login } }
+ issueFieldValues(first: 10) {
+ nodes {
+ __typename
+ ... on IssueFieldSingleSelectValue {
+ name
+ field { ... on IssueFieldSingleSelect { name } }
+ }
+ }
+ }
+ }
+ }
+ }
+}' --jq '
+ [.data.repository.issues.nodes[] |
+ select(.issueFieldValues.nodes[] |
+ select(.field.name == "Priority" and .name == "P1")
+ ) |
+ {number, title, updatedAt, assignees: [.assignees.nodes[].login]}
+ ]'
+```
+
+**Schema notes for `IssueFieldSingleSelectValue`:**
+- The selected option's display text is in `.name` (not `.value`)
+- Also available: `.color`, `.description`, `.id`
+- The parent field reference is in `.field` (use inline fragment to get the field name)
+
+### Search qualifier syntax (experimental)
+
+Issue fields may also be searchable using dot notation in search queries. This requires `advanced_search=true` on REST or `ISSUE_ADVANCED` search type on GraphQL, but results are inconsistent and may return 0 results even when matching issues exist.
+
+```
+field.priority:P0 # Single-select equals value
+field.target-date:>=2026-04-01 # Date comparison
+has:field.priority # Has any value set
+no:field.priority # Has no value set
+```
+
+Field names use the **slug** (lowercase, hyphens for spaces). For example, "Target Date" becomes `target-date`.
+
+```bash
+# REST API (may not return results in all environments)
+gh api "search/issues?q=repo:owner/repo+field.priority:P0+is:open&advanced_search=true" \
+ --jq '.items[] | "#\(.number): \(.title)"'
+```
+
+> **Warning:** The colon notation (`field:Priority:P1`) is silently ignored. If using search qualifiers, always use dot notation (`field.priority:P1`). However, the GraphQL bulk query approach above is more reliable. See [search.md](search.md) for the full search guide.
diff --git a/skills/github-issues/references/projects.md b/skills/github-issues/references/projects.md
index ce633a18..e373b4e6 100644
--- a/skills/github-issues/references/projects.md
+++ b/skills/github-issues/references/projects.md
@@ -24,16 +24,69 @@ Call `mcp__github__projects_write` with `method: "delete_project_item"`, `projec
## Workflow for project operations
-1. **Find the project** - use `projects_list` with `list_projects` to get the project number and node ID
+1. **Find the project** β see [Finding a project by name](#finding-a-project-by-name) below
2. **Discover fields** - use `projects_list` with `list_project_fields` to get field IDs and option IDs
3. **Find items** - use `projects_list` with `list_project_items` to get item IDs
4. **Mutate** - use `projects_write` to add, update, or delete items
+## Finding a project by name
+
+> **β οΈ Known issue:** `projectsV2(query: "β¦")` does keyword search, not exact name match, and returns results sorted by recency. Common words like "issue" or "bug" return hundreds of false positives. The actual project may be buried dozens of pages deep.
+
+Use this priority order:
+
+### 1. Direct lookup (if you know the number)
+```bash
+gh api graphql -f query='{
+ organization(login: "ORG") {
+ projectV2(number: 42) { id title }
+ }
+}' --jq '.data.organization.projectV2'
+```
+
+### 2. Reverse lookup from a known issue (most reliable)
+If the user mentions an issue, epic, or milestone that's in the project, query that issue's `projectItems` to discover the project:
+
+```bash
+gh api graphql -f query='{
+ repository(owner: "OWNER", name: "REPO") {
+ issue(number: 123) {
+ projectItems(first: 10) {
+ nodes {
+ id
+ project { number title id }
+ }
+ }
+ }
+ }
+}' --jq '.data.repository.issue.projectItems.nodes[] | {number: .project.number, title: .project.title, id: .project.id}'
+```
+
+This is the most reliable approach for large orgs where name search fails.
+
+### 3. GraphQL name search with client-side filtering (fallback)
+Query a large page and filter client-side for an exact title match:
+
+```bash
+gh api graphql -f query='{
+ organization(login: "ORG") {
+ projectsV2(first: 100, query: "search term") {
+ nodes { number title id }
+ }
+ }
+}' --jq '.data.organization.projectsV2.nodes[] | select(.title | test("(?i)^exact name$"))'
+```
+
+If this returns nothing, paginate with `after` cursor or broaden the regex. Results are sorted by recency so older projects require pagination.
+
+### 4. MCP tool (small orgs only)
+Call `mcp__github__projects_list` with `method: "list_projects"`. This works well for orgs with <50 projects but has no name filter, so you must scan all results.
+
## Project discovery for progress reports
When a user asks for a progress update on a project (e.g., "Give me a progress update for Project X"), follow this workflow:
-1. **Search by name** - call `projects_list` with `list_projects` and scan results for a title matching the user's query. Project names are often informal, so match flexibly (e.g., "issue fields" matches "Issue fields" or "Issue Fields and Types").
+1. **Find the project** β use the [finding a project](#finding-a-project-by-name) strategies above. Ask the user for a known issue number if name search fails.
2. **Discover fields** - call `projects_list` with `list_project_fields` to find the Status field (its options tell you the workflow stages) and any Iteration field (to scope to the current sprint).
@@ -53,23 +106,77 @@ When a user asks for a progress update on a project (e.g., "Give me a progress u
5. **Add context** - if items have sub-issues, include `subIssuesSummary` counts. If items have dependencies, note blocked items and what blocks them.
-**Tip:** For org-level projects, use GraphQL with `organization.projectsV2(first: 20, query: "search term")` to search by name directly, which is faster than listing all projects.
+## OAuth Scope Requirements
-## Using GraphQL directly (advanced)
+| Operation | Required scope |
+|-----------|---------------|
+| Read projects, fields, items | `read:project` |
+| Add/update/delete items, change field values | `project` |
-Required scope: `read:project` for queries, `project` for mutations.
+**Common pitfall:** The default `gh auth` token often only has `read:project`. Mutations will fail with `INSUFFICIENT_SCOPES`. To add the write scope:
-**Find a project:**
-```graphql
-{
- organization(login: "ORG") {
- projectV2(number: 5) { id title }
- }
-}
+```bash
+gh auth refresh -h github.com -s project
```
-**List fields (including single-select options):**
-```graphql
+This triggers a browser-based OAuth flow. You must complete it before mutations will work.
+
+## Finding an Issue's Project Item ID
+
+When you know the issue but need its project item ID (e.g., to update its Status), query from the issue side:
+
+```bash
+gh api graphql -f query='
+{
+ repository(owner: "OWNER", name: "REPO") {
+ issue(number: 123) {
+ projectItems(first: 5) {
+ nodes {
+ id
+ project { title number }
+ fieldValues(first: 10) {
+ nodes {
+ ... on ProjectV2ItemFieldSingleSelectValue {
+ name
+ field { ... on ProjectV2SingleSelectField { name } }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}' --jq '.data.repository.issue.projectItems.nodes'
+```
+
+This returns the item ID, project info, and current field values in one query.
+
+## Using GraphQL via gh api (recommended)
+
+Use `gh api graphql` to run GraphQL queries and mutations. This is more reliable than MCP tools for write operations.
+
+**Find a project and its Status field options:**
+```bash
+gh api graphql -f query='
+{
+ organization(login: "ORG") {
+ projectV2(number: 5) {
+ id
+ title
+ field(name: "Status") {
+ ... on ProjectV2SingleSelectField {
+ id
+ options { id name }
+ }
+ }
+ }
+ }
+}' --jq '.data.organization.projectV2'
+```
+
+**List all fields (including iterations):**
+```bash
+gh api graphql -f query='
{
node(id: "PROJECT_ID") {
... on ProjectV2 {
@@ -82,23 +189,12 @@ Required scope: `read:project` for queries, `project` for mutations.
}
}
}
-}
+}' --jq '.data.node.fields.nodes'
```
-**Add an item:**
-```graphql
-mutation {
- addProjectV2ItemById(input: {
- projectId: "PROJECT_ID"
- contentId: "ISSUE_OR_PR_NODE_ID"
- }) {
- item { id }
- }
-}
-```
-
-**Update a field value:**
-```graphql
+**Update a field value (e.g., set Status to "In Progress"):**
+```bash
+gh api graphql -f query='
mutation {
updateProjectV2ItemFieldValue(input: {
projectId: "PROJECT_ID"
@@ -108,13 +204,27 @@ mutation {
}) {
projectV2Item { id }
}
-}
+}'
```
Value accepts one of: `text`, `number`, `date`, `singleSelectOptionId`, `iterationId`.
+**Add an item:**
+```bash
+gh api graphql -f query='
+mutation {
+ addProjectV2ItemById(input: {
+ projectId: "PROJECT_ID"
+ contentId: "ISSUE_OR_PR_NODE_ID"
+ }) {
+ item { id }
+ }
+}'
+```
+
**Delete an item:**
-```graphql
+```bash
+gh api graphql -f query='
mutation {
deleteProjectV2Item(input: {
projectId: "PROJECT_ID"
@@ -122,5 +232,42 @@ mutation {
}) {
deletedItemId
}
-}
+}'
+```
+
+## End-to-End Example: Set Issue Status to "In Progress"
+
+```bash
+# 1. Get the issue's project item ID, project ID, and current status
+gh api graphql -f query='{
+ repository(owner: "github", name: "planning-tracking") {
+ issue(number: 2574) {
+ projectItems(first: 1) {
+ nodes { id project { id title } }
+ }
+ }
+ }
+}' --jq '.data.repository.issue.projectItems.nodes[0]'
+
+# 2. Get the Status field ID and "In Progress" option ID
+gh api graphql -f query='{
+ node(id: "PROJECT_ID") {
+ ... on ProjectV2 {
+ field(name: "Status") {
+ ... on ProjectV2SingleSelectField { id options { id name } }
+ }
+ }
+ }
+}' --jq '.data.node.field'
+
+# 3. Update the status
+gh api graphql -f query='mutation {
+ updateProjectV2ItemFieldValue(input: {
+ projectId: "PROJECT_ID"
+ itemId: "ITEM_ID"
+ fieldId: "FIELD_ID"
+ value: { singleSelectOptionId: "IN_PROGRESS_OPTION_ID" }
+ }) { projectV2Item { id } }
+}'
+```
```
diff --git a/skills/github-issues/references/search.md b/skills/github-issues/references/search.md
new file mode 100644
index 00000000..9e08efaf
--- /dev/null
+++ b/skills/github-issues/references/search.md
@@ -0,0 +1,231 @@
+# Advanced Issue Search
+
+The `search_issues` MCP tool uses GitHub's issue search query format for cross-repo searches, supporting implicit-AND queries, date ranges, and metadata filters (but not explicit OR/NOT operators).
+
+## When to Use Search vs List vs Advanced Search
+
+There are three ways to find issues, each with different capabilities:
+
+| Capability | `list_issues` (MCP) | `search_issues` (MCP) | Advanced search (`gh api`) |
+|-----------|---------------------|----------------------|---------------------------|
+| **Scope** | Single repo only | Cross-repo, cross-org | Cross-repo, cross-org |
+| **Issue field filters** (`field.priority:P0`) | No | No | **Yes** (dot notation) |
+| **Issue type filter** (`type:Bug`) | No | Yes | Yes |
+| **Boolean logic** (AND/OR/NOT, nesting) | No | Yes (implicit AND only) | **Yes** (explicit AND/OR/NOT) |
+| **Label/state/date filters** | Yes | Yes | Yes |
+| **Assignee/author/mentions** | No | Yes | Yes |
+| **Negation** (`-label:x`, `no:label`) | No | Yes | Yes |
+| **Text search** (title/body/comments) | No | Yes | Yes |
+| **`since` filter** | Yes | No | No |
+| **Result limit** | No cap (paginate all) | 1,000 max | 1,000 max |
+| **How to call** | MCP tool directly | MCP tool directly | `gh api` with `advanced_search=true` |
+
+**Decision guide:**
+- **Single repo, simple filters (state, labels, recent updates):** use `list_issues`
+- **Cross-repo, text search, author/assignee, issue types:** use `search_issues`
+- **Issue field values (Priority, dates, custom fields) or complex boolean logic:** use `gh api` with `advanced_search=true`
+
+## Query Syntax
+
+The `query` parameter is a string of search terms and qualifiers. A space between terms is implicit AND.
+
+### Scoping
+
+```
+repo:owner/repo # Single repo (auto-added if you pass owner+repo params)
+org:github # All repos in an org
+user:octocat # All repos owned by user
+in:title # Search only in title
+in:body # Search only in body
+in:comments # Search only in comments
+```
+
+### State & Close Reason
+
+```
+is:open # Open issues (auto-added: is:issue)
+is:closed # Closed issues
+reason:completed # Closed as completed
+reason:"not planned" # Closed as not planned
+```
+
+### People
+
+```
+author:username # Created by
+assignee:username # Assigned to
+mentions:username # Mentions user
+commenter:username # Has comment from
+involves:username # Author OR assignee OR mentioned OR commenter
+author:@me # Current authenticated user
+team:org/team # Team mentioned
+```
+
+### Labels, Milestones, Projects, Types
+
+```
+label:"bug" # Has label (quote multi-word labels)
+label:bug label:priority # Has BOTH labels (AND)
+label:bug,enhancement # Has EITHER label (OR)
+-label:wontfix # Does NOT have label
+milestone:"v2.0" # In milestone
+project:github/57 # In project board
+type:"Bug" # Issue type
+```
+
+### Missing Metadata
+
+```
+no:label # No labels assigned
+no:milestone # No milestone
+no:assignee # Unassigned
+no:project # Not in any project
+```
+
+### Dates
+
+All date qualifiers support `>`, `<`, `>=`, `<=`, and range (`..`) operators with ISO 8601 format:
+
+```
+created:>2026-01-01 # Created after Jan 1
+updated:>=2026-03-01 # Updated since Mar 1
+closed:2026-01-01..2026-02-01 # Closed in January
+created:<2026-01-01 # Created before Jan 1
+```
+
+### Linked Content
+
+```
+linked:pr # Issue has a linked PR
+-linked:pr # Issues not yet linked to any PR
+linked:issue # PR is linked to an issue
+```
+
+### Numeric Filters
+
+```
+comments:>10 # More than 10 comments
+comments:0 # No comments
+interactions:>100 # Reactions + comments > 100
+reactions:>50 # More than 50 reactions
+```
+
+### Boolean Logic & Nesting
+
+Use `AND`, `OR`, and parentheses (up to 5 levels deep, max 5 operators):
+
+```
+label:bug AND assignee:octocat
+assignee:octocat OR assignee:hubot
+(type:"Bug" AND label:P1) OR (type:"Feature" AND label:P1)
+-author:app/dependabot # Exclude bot issues
+```
+
+A space between terms without an explicit operator is treated as AND.
+
+## Common Query Patterns
+
+**Unassigned bugs:**
+```
+repo:owner/repo type:"Bug" no:assignee is:open
+```
+
+**Issues closed this week:**
+```
+repo:owner/repo is:closed closed:>=2026-03-01
+```
+
+**Stale open issues (no updates in 90 days):**
+```
+repo:owner/repo is:open updated:<2026-01-01
+```
+
+**Open issues without a linked PR (needs work):**
+```
+repo:owner/repo is:open -linked:pr
+```
+
+**Issues I'm involved in across an org:**
+```
+org:github involves:@me is:open
+```
+
+**High-activity issues:**
+```
+repo:owner/repo is:open comments:>20
+```
+
+**Issues by type and priority label:**
+```
+repo:owner/repo type:"Epic" label:P1 is:open
+```
+
+## Issue Field Search
+
+> **Reliability warning:** The `field.name:value` search qualifier syntax is experimental and may return 0 results even when matching issues exist. For reliable filtering by field values, use the GraphQL bulk query approach documented in [issue-fields.md](issue-fields.md#searching-by-field-values).
+
+Issue fields can theoretically be searched via the `field.name:value` qualifier using **advanced search mode**. This works in the web UI but results from the API are inconsistent.
+
+### REST API
+
+Add `advanced_search=true` as a query parameter:
+
+```bash
+gh api "search/issues?q=org:github+field.priority:P0+type:Epic+is:open&advanced_search=true" \
+ --jq '.items[] | "#\(.number): \(.title)"'
+```
+
+### GraphQL
+
+Use `type: ISSUE_ADVANCED` instead of `type: ISSUE`:
+
+```graphql
+{
+ search(query: "org:github field.priority:P0 type:Epic is:open", type: ISSUE_ADVANCED, first: 10) {
+ issueCount
+ nodes {
+ ... on Issue { number title }
+ }
+ }
+}
+```
+
+### Issue Field Qualifiers
+
+The syntax uses **dot notation** with the field's slug name (lowercase, hyphens for spaces):
+
+```
+field.priority:P0 # Single-select field equals value
+field.priority:P1 # Different option value
+field.target-date:>=2026-04-01 # Date comparison
+has:field.priority # Has any value set
+no:field.priority # Has no value set
+```
+
+**MCP limitation:** The `search_issues` MCP tool does not pass `advanced_search=true`. You must use `gh api` directly for issue field searches.
+
+### Common Field Search Patterns
+
+**P0 epics across an org:**
+```
+org:github field.priority:P0 type:Epic is:open
+```
+
+**Issues with a target date this quarter:**
+```
+org:github field.target-date:>=2026-04-01 field.target-date:<=2026-06-30 is:open
+```
+
+**Open bugs missing priority:**
+```
+org:github no:field.priority type:Bug is:open
+```
+
+## Limitations
+
+- Query text: max **256 characters** (excluding operators/qualifiers)
+- Boolean operators: max **5** AND/OR/NOT per query
+- Results: max **1,000** total (use `list_issues` if you need all issues)
+- Repo scan: searches up to **4,000** matching repositories
+- Rate limit: **30 requests/minute** for authenticated search
+- Issue field search requires `advanced_search=true` (REST) or `ISSUE_ADVANCED` (GraphQL); not available through MCP `search_issues`
diff --git a/skills/github-issues/references/sub-issues.md b/skills/github-issues/references/sub-issues.md
index 96577f9d..aac288e6 100644
--- a/skills/github-issues/references/sub-issues.md
+++ b/skills/github-issues/references/sub-issues.md
@@ -2,46 +2,64 @@
Sub-issues let you break down work into hierarchical tasks. Each parent issue can have up to 100 sub-issues, nested up to 8 levels deep. Sub-issues can span repositories within the same owner.
+## Recommended Workflow
+
+The simplest way to create a sub-issue is **two steps**: create the issue, then link it.
+
+```bash
+# Step 1: Create the issue and capture its numeric ID
+ISSUE_ID=$(gh api repos/{owner}/{repo}/issues \
+ -X POST \
+ -f title="Sub-task title" \
+ -f body="Description" \
+ --jq '.id')
+
+# Step 2: Link it as a sub-issue of the parent
+# IMPORTANT: sub_issue_id must be an integer. Use --input (not -f) to send JSON.
+echo "{\"sub_issue_id\": $ISSUE_ID}" | gh api repos/{owner}/{repo}/issues/{parent_number}/sub_issues -X POST --input -
+```
+
+**Why `--input` instead of `-f`?** The `gh api -f` flag sends all values as strings, but the API requires `sub_issue_id` as an integer. Using `-f sub_issue_id=12345` will return a 422 error.
+
+Alternatively, use GraphQL `createIssue` with `parentIssueId` to do it in one step (see GraphQL section below).
+
## Using MCP tools
**List sub-issues:**
Call `mcp__github__issue_read` with `method: "get_sub_issues"`, `owner`, `repo`, and `issue_number`.
**Create an issue as a sub-issue:**
-There is no MCP tool for creating sub-issues directly. Use REST or GraphQL (see below).
+There is no MCP tool for creating sub-issues directly. Use the workflow above or GraphQL.
## Using REST API
**List sub-issues:**
-```
-GET /repos/{owner}/{repo}/issues/{issue_number}/sub_issues
+```bash
+gh api repos/{owner}/{repo}/issues/{issue_number}/sub_issues
```
**Get parent issue:**
-```
-GET /repos/{owner}/{repo}/issues/{issue_number}/parent
+```bash
+gh api repos/{owner}/{repo}/issues/{issue_number}/parent
```
**Add an existing issue as a sub-issue:**
-```
-POST /repos/{owner}/{repo}/issues/{issue_number}/sub_issues
-Body: { "sub_issue_id": 12345 }
+```bash
+# sub_issue_id is the numeric issue ID (not the issue number)
+# Get it from the .id field when creating or fetching an issue
+echo '{"sub_issue_id": 12345}' | gh api repos/{owner}/{repo}/issues/{parent_number}/sub_issues -X POST --input -
```
-The `sub_issue_id` is the numeric issue **ID** (not the issue number). Get it from the issue's `id` field in any API response.
-
-To move a sub-issue that already has a parent, add `"replace_parent": true`.
+To move a sub-issue that already has a parent, add `"replace_parent": true` to the JSON body.
**Remove a sub-issue:**
-```
-DELETE /repos/{owner}/{repo}/issues/{issue_number}/sub_issue
-Body: { "sub_issue_id": 12345 }
+```bash
+echo '{"sub_issue_id": 12345}' | gh api repos/{owner}/{repo}/issues/{parent_number}/sub_issue -X DELETE --input -
```
**Reprioritize a sub-issue:**
-```
-PATCH /repos/{owner}/{repo}/issues/{issue_number}/sub_issues/priority
-Body: { "sub_issue_id": 6, "after_id": 5 }
+```bash
+echo '{"sub_issue_id": 6, "after_id": 5}' | gh api repos/{owner}/{repo}/issues/{parent_number}/sub_issues/priority -X PATCH --input -
```
Use `after_id` or `before_id` to position the sub-issue relative to another.
diff --git a/skills/migrating-oracle-to-postgres-stored-procedures/SKILL.md b/skills/migrating-oracle-to-postgres-stored-procedures/SKILL.md
new file mode 100644
index 00000000..f6363c31
--- /dev/null
+++ b/skills/migrating-oracle-to-postgres-stored-procedures/SKILL.md
@@ -0,0 +1,42 @@
+---
+name: migrating-oracle-to-postgres-stored-procedures
+description: 'Migrates Oracle PL/SQL stored procedures to PostgreSQL PL/pgSQL. Translates Oracle-specific syntax, preserves method signatures and type-anchored parameters, leverages orafce where appropriate, and applies COLLATE "C" for Oracle-compatible text sorting. Use when converting Oracle stored procedures or functions to PostgreSQL equivalents during a database migration.'
+---
+
+# Migrating Stored Procedures from Oracle to PostgreSQL
+
+Translate Oracle PL/SQL stored procedures and functions to PostgreSQL PL/pgSQL equivalents.
+
+## Workflow
+
+```
+Progress:
+- [ ] Step 1: Read the Oracle source procedure
+- [ ] Step 2: Translate to PostgreSQL PL/pgSQL
+- [ ] Step 3: Write the migrated procedure to Postgres output directory
+```
+
+**Step 1: Read the Oracle source procedure**
+
+Read the Oracle stored procedure from `.github/oracle-to-postgres-migration/DDL/Oracle/Procedures and Functions/`. Consult the Oracle table/view definitions at `.github/oracle-to-postgres-migration/DDL/Oracle/Tables and Views/` for type resolution.
+
+**Step 2: Translate to PostgreSQL PL/pgSQL**
+
+Apply these translation rules:
+
+- Translate all Oracle-specific syntax to PostgreSQL equivalents.
+- Preserve original functionality and control flow logic.
+- Keep type-anchored input parameters (e.g., `PARAM_NAME IN table_name.column_name%TYPE`).
+- Use explicit types (`NUMERIC`, `VARCHAR`, `INTEGER`) for output parameters passed to other procedures β do not type-anchor these.
+- Do not alter method signatures.
+- Do not prefix object names with schema names unless already present in the Oracle source.
+- Leave exception handling and rollback logic unchanged.
+- Do not generate `COMMENT` or `GRANT` statements.
+- Use `COLLATE "C"` when ordering by text fields for Oracle-compatible sorting.
+- Leverage the `orafce` extension when it improves clarity or fidelity.
+
+Consult the PostgreSQL table/view definitions at `.github/oracle-to-postgres-migration/DDL/Postgres/Tables and Views/` for target schema details.
+
+**Step 3: Write the migrated procedure to Postgres output directory**
+
+Place each migrated procedure in its own file under `.github/oracle-to-postgres-migration/DDL/Postgres/Procedures and Functions/{PACKAGE_NAME_IF_APPLICABLE}/`. One procedure per file.
diff --git a/skills/napkin/SKILL.md b/skills/napkin/SKILL.md
new file mode 100644
index 00000000..aab90cbe
--- /dev/null
+++ b/skills/napkin/SKILL.md
@@ -0,0 +1,154 @@
+---
+name: napkin
+description: 'Visual whiteboard collaboration for Copilot CLI. Creates an interactive whiteboard that opens in your browser β draw, sketch, add sticky notes, then share everything back with Copilot. Copilot sees your drawings and text, and responds with analysis, suggestions, and ideas.'
+---
+
+# Napkin β Visual Whiteboard for Copilot CLI
+
+Napkin gives users a browser-based whiteboard where they can draw, sketch, and add sticky notes to think through ideas visually. The agent reads back the whiteboard contents (via a PNG snapshot and optional JSON data) and responds conversationally with analysis, suggestions, and next steps.
+
+The target audience is lawyers, PMs, and business stakeholders β not software developers. Keep everything approachable and jargon-free.
+
+---
+
+## Activation
+
+When the user invokes this skill β saying things like "let's napkin," "open a napkin," "start a whiteboard," or using the slash command β do the following:
+
+1. **Copy the bundled HTML template** from the skill assets to the user's Desktop.
+ - The template lives at `assets/napkin.html` relative to this SKILL.md file.
+ - Copy it to `~/Desktop/napkin.html`.
+ - If `~/Desktop/napkin.html` already exists, ask the user whether they want to open the existing one or start fresh before overwriting.
+
+2. **Open it in the default browser:**
+ - macOS: `open ~/Desktop/napkin.html`
+ - Linux: `xdg-open ~/Desktop/napkin.html`
+ - Windows: `start ~/Desktop/napkin.html`
+
+3. **Tell the user what to do next.** Say something warm and simple:
+
+ ```
+ Your napkin is open in your browser!
+
+ Draw, sketch, or add sticky notes β whatever helps you think through your idea.
+
+ When you're ready for my input, click the green "Share with Copilot" button on the whiteboard, then come back here and say "check the napkin."
+ ```
+
+---
+
+## Reading the Napkin
+
+When the user says "check the napkin," "look at the napkin," "what do you think," "read my napkin," or anything similar, follow these steps:
+
+### Step 1 β Read the PNG snapshot (primary)
+
+Look for a PNG file called `napkin-snapshot.png`. Check these locations in order (the browser saves it to the user's default download folder, which varies):
+
+1. `~/Downloads/napkin-snapshot.png`
+2. `~/Desktop/napkin-snapshot.png`
+
+Use the `view` tool to read the PNG. This sends the image as base64-encoded data to the model, which can visually interpret it. The PNG is the **primary** way the agent understands what the user drew β it captures freehand sketches, arrows, spatial layout, annotations, circled or crossed-out items, and anything else on the canvas.
+
+If the PNG is not found in either location, do NOT silently skip it. Instead, tell the user:
+
+```
+I don't see a snapshot from your napkin yet. Here's what to do:
+
+1. Go to your whiteboard in the browser
+2. Click the green "Share with Copilot" button
+3. Come back here and say "check the napkin" again
+
+The button saves a screenshot that I can look at.
+```
+
+### Step 2 β Read the clipboard for structured JSON (supplementary)
+
+Also try to grab structured JSON data from the system clipboard. The whiteboard copies this automatically alongside the PNG.
+
+- macOS: `pbpaste`
+- Linux: `xclip -selection clipboard -o`
+- Windows: `powershell -command "Get-Clipboard"`
+
+The JSON contains the exact text content of sticky notes and text labels, their positions, and their colors. This supplements the PNG by giving you precise text that might be hard to read from a screenshot.
+
+If the clipboard doesn't contain JSON data, that's fine β the PNG alone gives the model plenty to work with. Do not treat a missing clipboard as an error.
+
+### Step 3 β Interpret both sources together
+
+Synthesize the visual snapshot and the structured text into a coherent understanding of what the user is thinking or planning:
+
+- **From the PNG:** Describe what you see β sketches, diagrams, flowcharts, groupings, arrows, spatial layout, annotations, circled items, crossed-out items, emphasis marks.
+- **From the JSON:** Read the exact text content of sticky notes and labels, noting their positions and colors.
+- **Combine both** into a single, conversational interpretation.
+
+### Step 4 β Respond conversationally
+
+Do not dump raw data or a technical summary. Respond as a collaborator who looked at someone's whiteboard sketch. Examples:
+
+- "I can see you've sketched out a three-stage process β it looks like you're thinking about [X] flowing into [Y] and then [Z]. The sticky note in the corner says '[text]' β is that a concern you want me to address?"
+- "It looks like you've grouped these four ideas together on the left side and separated them from the two items on the right. Are you thinking of these as two different categories?"
+- "I see you drew arrows connecting [A] to [B] to [C] β is this the workflow you're envisioning?"
+
+### Step 5 β Ask what's next
+
+Always end by offering a next step:
+
+- "Want me to build on this?"
+- "Should I turn this into a structured document?"
+- "Want me to add my suggestions to the napkin?"
+
+---
+
+## Responding on the Napkin
+
+When the user wants the agent to add content back to the whiteboard:
+
+- The agent **cannot** directly modify the HTML file's canvas state β that's managed by JavaScript running in the browser.
+- Instead, offer practical alternatives:
+ - Provide the response right here in the CLI, and suggest the user add it to the napkin manually.
+ - Offer to create a separate document (markdown, memo, checklist, etc.) based on what was interpreted from the napkin.
+ - If it makes sense, create an updated copy of `napkin.html` with pre-loaded content.
+
+---
+
+## Tone and Style
+
+- Use the same approachable, non-technical tone as the noob-mode skill.
+- Never use developer jargon without explaining it in plain English.
+- Treat the napkin as a creative, collaborative space β not a formal input mechanism.
+- Be encouraging about the user's sketches regardless of artistic quality.
+- Frame responses as "building on your thinking," not "analyzing your input."
+
+---
+
+## Error Handling
+
+**PNG snapshot not found:**
+
+```
+I don't see a snapshot from your napkin yet. Here's what to do:
+
+1. Go to your whiteboard in the browser
+2. Click the green "Share with Copilot" button
+3. Come back here and say "check the napkin" again
+
+The button saves a screenshot that I can look at.
+```
+
+**Whiteboard file doesn't exist on Desktop:**
+
+```
+It looks like we haven't started a napkin yet. Want me to open one for you?
+```
+
+---
+
+## Important Notes
+
+- The PNG interpretation is the **primary** channel. Multimodal models can read and interpret the base64 image data returned by the `view` tool.
+- The JSON clipboard data is **supplementary** β it provides precise text but does not capture freehand drawings.
+- Always check for the PNG first. If it isn't found, prompt the user to click "Share with Copilot."
+- If the clipboard doesn't have JSON data, proceed with the PNG alone.
+- The HTML template is located at `assets/napkin.html` relative to this SKILL.md file.
+- If the noob-mode skill is also active, use its risk indicator format (green/yellow/red) when requesting file or bash permissions.
diff --git a/skills/napkin/assets/napkin.html b/skills/napkin/assets/napkin.html
new file mode 100644
index 00000000..8a934ecb
--- /dev/null
+++ b/skills/napkin/assets/napkin.html
@@ -0,0 +1,2019 @@
+
+
+
+
+
+Napkin β Whiteboard for Copilot
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Welcome to Napkin!
+
Your whiteboard for brainstorming with Copilot.
+
+
+
1
+
Draw, sketch, or add sticky notes — whatever helps you think
+
+
+
2
+
When you're ready, click "Share with Copilot" (the green button)
+
+
+
3
+
Go back to your terminal and say "check the napkin"
+
+
+
4
+
Copilot will look at your whiteboard and respond
+
+
+
That's it. Let's go!
+
+
+
+
+
+
+
+
✔️
+
Shared with Copilot!
+
+ 💾 A screenshot was saved (check your Downloads or Desktop).
+ 📋 The text content was copied to your clipboard.
+ Go back to Copilot CLI and say:
+ "check the napkin"
+
+
+
+
+
+
+
+
+
Keyboard Shortcuts
+
Select / MoveV
+
PenP
+
RectangleR
+
CircleC
+
ArrowA
+
LineL
+
TextT
+
Sticky NoteN
+
EraserE
+
UndoCtrl/Cmd+Z
+
RedoCtrl/Cmd+Shift+Z
+
Pan canvasSpace+Drag
+
+
+
+
+
+ 100%
+
+
+
+
+
+
+
+
+
+
diff --git a/skills/napkin/assets/step1-activate.svg b/skills/napkin/assets/step1-activate.svg
new file mode 100644
index 00000000..548ac4a5
--- /dev/null
+++ b/skills/napkin/assets/step1-activate.svg
@@ -0,0 +1,107 @@
+
\ No newline at end of file
diff --git a/skills/napkin/assets/step2-whiteboard.svg b/skills/napkin/assets/step2-whiteboard.svg
new file mode 100644
index 00000000..e8ca67e6
--- /dev/null
+++ b/skills/napkin/assets/step2-whiteboard.svg
@@ -0,0 +1,157 @@
+
\ No newline at end of file
diff --git a/skills/napkin/assets/step3-draw.svg b/skills/napkin/assets/step3-draw.svg
new file mode 100644
index 00000000..ef836dfc
--- /dev/null
+++ b/skills/napkin/assets/step3-draw.svg
@@ -0,0 +1,143 @@
+
\ No newline at end of file
diff --git a/skills/napkin/assets/step4-share.svg b/skills/napkin/assets/step4-share.svg
new file mode 100644
index 00000000..b24b854b
--- /dev/null
+++ b/skills/napkin/assets/step4-share.svg
@@ -0,0 +1,98 @@
+
\ No newline at end of file
diff --git a/skills/napkin/assets/step5-response.svg b/skills/napkin/assets/step5-response.svg
new file mode 100644
index 00000000..8fa643b8
--- /dev/null
+++ b/skills/napkin/assets/step5-response.svg
@@ -0,0 +1,112 @@
+
\ No newline at end of file
diff --git a/skills/planning-oracle-to-postgres-migration-integration-testing/SKILL.md b/skills/planning-oracle-to-postgres-migration-integration-testing/SKILL.md
new file mode 100644
index 00000000..448b9555
--- /dev/null
+++ b/skills/planning-oracle-to-postgres-migration-integration-testing/SKILL.md
@@ -0,0 +1,44 @@
+---
+name: planning-oracle-to-postgres-migration-integration-testing
+description: 'Creates an integration testing plan for .NET data access artifacts during Oracle-to-PostgreSQL database migrations. Analyzes a single project to identify repositories, DAOs, and service layers that interact with the database, then produces a structured testing plan. Use when planning integration test coverage for a migrated project, identifying which data access methods need tests, or preparing for Oracle-to-PostgreSQL migration validation.'
+---
+
+# Planning Integration Testing for Oracle-to-PostgreSQL Migration
+
+Analyze a single target project to identify data access artifacts that require integration testing, then produce a structured, actionable testing plan.
+
+## Workflow
+
+```
+Progress:
+- [ ] Step 1: Identify data access artifacts
+- [ ] Step 2: Classify testing priorities
+- [ ] Step 3: Write the testing plan
+```
+
+**Step 1: Identify data access artifacts**
+
+Scope to the target project only. Find classes and methods that interact directly with the database β repositories, DAOs, stored procedure callers, service layers performing CRUD operations.
+
+**Step 2: Classify testing priorities**
+
+Rank artifacts by migration risk. Prioritize methods that use Oracle-specific features (refcursors, `TO_CHAR`, implicit type coercion, `NO_DATA_FOUND`) over simple CRUD.
+
+**Step 3: Write the testing plan**
+
+Write a markdown plan covering:
+- List of testable artifacts with method signatures
+- Recommended test cases per artifact
+- Seed data requirements
+- Known OracleβPostgreSQL behavioral differences to validate
+
+## Output
+
+Write the plan to: `.github/oracle-to-postgres-migration/Reports/{TARGET_PROJECT} Integration Testing Plan.md`
+
+## Key Constraints
+
+- **Single project scope** β only plan tests for artifacts within the target project.
+- **Database interactions only** β skip business logic that does not touch the database.
+- **Oracle is the golden source** β tests should capture Oracle's expected behavior for comparison against PostgreSQL.
+- **No multi-connection harnessing** β migrated applications are copied and renamed (e.g., `MyApp.Postgres`), so each instance targets one database.
diff --git a/skills/reviewing-oracle-to-postgres-migration/SKILL.md b/skills/reviewing-oracle-to-postgres-migration/SKILL.md
new file mode 100644
index 00000000..7f8dee87
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: reviewing-oracle-to-postgres-migration
+description: 'Identifies Oracle-to-PostgreSQL migration risks by cross-referencing code against known behavioral differences (empty strings, refcursors, type coercion, sorting, timestamps, concurrent transactions, etc.). Use when planning a database migration, reviewing migration artifacts, or validating that integration tests cover Oracle/PostgreSQL differences.'
+---
+
+# Oracle-to-PostgreSQL Database Migration
+
+Surfaces migration risks and validates migration work against known Oracle/PostgreSQL behavioral differences documented in the `references/` folder.
+
+## When to use
+
+1. **Planning** β Before starting migration work on a procedure, trigger, query, or refcursor client. Identify which reference insights apply so risks are addressed up front.
+2. **Validating** β After migration work is done, confirm every applicable insight was addressed and integration tests cover the new PostgreSQL semantics.
+
+## Workflow
+
+Determine the task type:
+
+**Planning a migration?** Follow the risk assessment workflow.
+**Validating completed work?** Follow the validation workflow.
+
+### Risk assessment workflow (planning)
+
+```
+Risk Assessment:
+- [ ] Step 1: Identify the migration scope
+- [ ] Step 2: Screen each insight for applicability
+- [ ] Step 3: Document risks and recommended actions
+```
+
+**Step 1: Identify the migration scope**
+
+List the affected database objects (procedures, triggers, queries, views) and the application code that calls them.
+
+**Step 2: Screen each insight for applicability**
+
+Review the reference index in [references/REFERENCE.md](references/REFERENCE.md). For each entry, determine whether the migration scope contains patterns affected by that insight. Read the full reference file only when the insight is potentially relevant.
+
+**Step 3: Document risks and recommended actions**
+
+For each applicable insight, note the specific risk and the recommended fix pattern from the reference file. Flag any insight that requires a design decision (e.g., whether to preserve Oracle empty-string-as-NULL semantics or adopt PostgreSQL behavior).
+
+### Validation workflow (post-migration)
+
+```
+Validation:
+- [ ] Step 1: Map the migration artifact
+- [ ] Step 2: Cross-check applicable insights
+- [ ] Step 3: Verify integration test coverage
+- [ ] Step 4: Gate the result
+```
+
+**Step 1: Map the migration artifact**
+
+Identify the migrated object and summarize the change set.
+
+**Step 2: Cross-check applicable insights**
+
+For each reference in [references/REFERENCE.md](references/REFERENCE.md), confirm the behavior or test requirement is acknowledged and addressed in the migration work.
+
+**Step 3: Verify integration test coverage**
+
+Confirm tests exercise both the happy path and the failure scenarios highlighted in applicable insights (exceptions, sorting, refcursor consumption, concurrent transactions, timestamps, etc.).
+
+**Step 4: Gate the result**
+
+Return a checklist asserting each applicable insight was addressed, migration scripts run, and integration tests pass.
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/REFERENCE.md b/skills/reviewing-oracle-to-postgres-migration/references/REFERENCE.md
new file mode 100644
index 00000000..5dc9bace
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/REFERENCE.md
@@ -0,0 +1,13 @@
+# Reference Index
+
+| File | Brief description |
+| --- | --- |
+| [empty-strings-handling.md](empty-strings-handling.md) | Oracle treats '' as NULL; PostgreSQL keeps empty strings distinctβpatterns to align behavior in code, tests, and migrations. |
+| [no-data-found-exceptions.md](no-data-found-exceptions.md) | Oracle SELECT INTO raises "no data found"; PostgreSQL doesnβtβadd explicit NOT FOUND handling to mirror Oracle behavior. |
+| [oracle-parentheses-from-clause.md](oracle-parentheses-from-clause.md) | Oracle allows `FROM(TABLE_NAME)` syntax; PostgreSQL requires `FROM TABLE_NAME`βremove unnecessary parentheses around table names. |
+| [oracle-to-postgres-sorting.md](oracle-to-postgres-sorting.md) | How to preserve Oracle-like ordering in PostgreSQL using COLLATE "C" and DISTINCT wrapper patterns. |
+| [oracle-to-postgres-to-char-numeric.md](oracle-to-postgres-to-char-numeric.md) | Oracle allows TO_CHAR(numeric) without format; PostgreSQL requires format stringβuse CAST(numeric AS TEXT) instead. |
+| [oracle-to-postgres-type-coercion.md](oracle-to-postgres-type-coercion.md) | PostgreSQL strict type checks vs. Oracle implicit coercionβfix comparison errors by quoting or casting literals. |
+| [postgres-concurrent-transactions.md](postgres-concurrent-transactions.md) | PostgreSQL allows only one active command per connectionβmaterialize results or use separate connections to avoid concurrent operation errors. |
+| [postgres-refcursor-handling.md](postgres-refcursor-handling.md) | Differences in refcursor handling; PostgreSQL requires fetching by cursor nameβC# patterns to unwrap and read results. |
+| [oracle-to-postgres-timestamp-timezone.md](oracle-to-postgres-timestamp-timezone.md) | CURRENT_TIMESTAMP / NOW() return UTC-normalised timestamptz in PostgreSQL; Npgsql surfaces DateTime.Kind=Unspecifiedβforce UTC at connection open and in application code. |
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/empty-strings-handling.md b/skills/reviewing-oracle-to-postgres-migration/references/empty-strings-handling.md
new file mode 100644
index 00000000..c6a82155
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/empty-strings-handling.md
@@ -0,0 +1,69 @@
+# Oracle to PostgreSQL: Empty String Handling Differences
+
+## Problem
+
+Oracle automatically converts empty strings (`''`) to `NULL` in VARCHAR2 columns. PostgreSQL preserves empty strings as distinct from `NULL`. This difference can cause application logic errors and test failures during migration.
+
+## Behavior Comparison
+
+**Oracle:**
+- Empty string (`''`) is **always** treated as `NULL` in VARCHAR2 columns
+- `WHERE column = ''` never matches rows; use `WHERE column IS NULL`
+- Cannot distinguish between explicit empty string and `NULL`
+
+**PostgreSQL:**
+- Empty string (`''`) and `NULL` are **distinct** values
+- `WHERE column = ''` matches empty strings
+- `WHERE column IS NULL` matches `NULL` values
+
+## Code Example
+
+```sql
+-- Oracle behavior
+INSERT INTO table (varchar_column) VALUES ('');
+SELECT * FROM table WHERE varchar_column IS NULL; -- Returns the row
+
+-- PostgreSQL behavior
+INSERT INTO table (varchar_column) VALUES ('');
+SELECT * FROM table WHERE varchar_column IS NULL; -- Returns nothing
+SELECT * FROM table WHERE varchar_column = ''; -- Returns the row
+```
+
+## Migration Actions
+
+### 1. Stored Procedures
+Update logic that assumes empty strings convert to `NULL`:
+
+```sql
+-- Preserve Oracle behavior (convert empty to NULL):
+column = NULLIF(param, '')
+
+-- Or accept PostgreSQL behavior (preserve empty string):
+column = param
+```
+
+### 2. Application Code
+Review code that checks for `NULL` and ensure it handles empty strings appropriately:
+
+```csharp
+// Before (Oracle-specific)
+if (value == null) { }
+
+// After (PostgreSQL-compatible)
+if (string.IsNullOrEmpty(value)) { }
+```
+
+### 3. Tests
+Update assertions to be compatible with both behaviors:
+
+```csharp
+// Migration-compatible test pattern
+var value = reader.IsDBNull(columnIndex) ? null : reader.GetString(columnIndex);
+Assert.IsTrue(string.IsNullOrEmpty(value));
+```
+
+### 4. Data Migration
+Decide whether to:
+- Convert existing `NULL` values to empty strings
+- Convert empty strings to `NULL` using `NULLIF(column, '')`
+- Leave values as-is and update application logic
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/no-data-found-exceptions.md b/skills/reviewing-oracle-to-postgres-migration/references/no-data-found-exceptions.md
new file mode 100644
index 00000000..a86bb823
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/no-data-found-exceptions.md
@@ -0,0 +1,99 @@
+# PostgreSQL Exception Handling: SELECT INTO No Data Found
+
+## Overview
+
+A common issue when migrating from Oracle to PostgreSQL involves `SELECT INTO` statements that expect to raise an exception when no rows are found. This pattern difference can cause integration tests to fail and application logic to behave incorrectly if not properly handled.
+
+---
+
+## Problem Description
+
+### Scenario
+
+A stored procedure performs a lookup operation using `SELECT INTO` to retrieve a required value:
+
+```sql
+SELECT column_name
+INTO variable_name
+FROM table1, table2
+WHERE table1.id = table2.id AND table1.id = parameter_value;
+```
+
+### Oracle Behavior
+
+When a `SELECT INTO` statement in Oracle does **not find any rows**, it automatically raises:
+
+```
+ORA-01403: no data found
+```
+
+This exception is caught by the procedure's exception handler and re-raised to the calling application.
+
+### PostgreSQL Behavior (Pre-Fix)
+
+When a `SELECT INTO` statement in PostgreSQL does **not find any rows**, it:
+
+- Sets the `FOUND` variable to `false`
+- **Silently continues** execution without raising an exception
+
+This fundamental difference can cause tests to fail silently and logic errors in production code.
+
+---
+
+## Root Cause Analysis
+
+The PostgreSQL version was missing explicit error handling for the `NOT FOUND` condition after the `SELECT INTO` statement.
+
+**Original Code (Problematic):**
+
+```plpgsql
+SELECT column_name
+INTO variable_name
+FROM table1, table2
+WHERE table1.id = table2.id AND table1.id = parameter_value;
+
+IF variable_name = 'X' THEN
+ result_variable := 1;
+ELSE
+ result_variable := 2;
+END IF;
+```
+
+**Problem:** No check for `NOT FOUND` condition. When an invalid parameter is passed, the SELECT returns no rows, `FOUND` becomes `false`, and execution continues with an uninitialized variable.
+
+---
+
+## Key Differences: Oracle vs PostgreSQL
+
+Add explicit `NOT FOUND` error handling to match Oracle behavior.
+
+**Fixed Code:**
+
+```plpgsql
+SELECT column_name
+INTO variable_name
+FROM table1, table2
+WHERE table1.id = table2.id AND table1.id = parameter_value;
+
+-- Explicitly raise exception if no data found (matching Oracle behavior)
+IF NOT FOUND THEN
+ RAISE EXCEPTION 'no data found';
+END IF;
+
+IF variable_name = 'X' THEN
+ result_variable := 1;
+ELSE
+ result_variable := 2;
+END IF;
+```
+
+---
+
+## Migration Notes for Similar Issues
+
+When fixing this issue, verify:
+
+1. **Success path tests** - Confirm valid parameters still work correctly
+2. **Exception tests** - Verify exceptions are raised with invalid parameters
+3. **Transaction rollback** - Ensure proper cleanup on errors
+4. **Data integrity** - Confirm all fields are populated correctly in success cases
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/oracle-parentheses-from-clause.md b/skills/reviewing-oracle-to-postgres-migration/references/oracle-parentheses-from-clause.md
new file mode 100644
index 00000000..b79e9232
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/oracle-parentheses-from-clause.md
@@ -0,0 +1,190 @@
+# Oracle to PostgreSQL: Parentheses in FROM Clause
+
+## Contents
+
+- Problem
+- Root Cause
+- Solution Pattern
+- Examples
+- Migration Checklist
+- Common Locations
+- Application Code Examples
+- Error Messages to Watch For
+- Testing Recommendations
+
+## Problem
+
+Oracle allows optional parentheses around table names in the FROM clause:
+
+```sql
+-- Oracle: Both are valid
+SELECT * FROM (TABLE_NAME) WHERE id = 1;
+SELECT * FROM TABLE_NAME WHERE id = 1;
+```
+
+PostgreSQL does **not** allow extra parentheses around a single table name in the FROM clause without it being a derived table or subquery. Attempting to use this pattern results in:
+
+```
+Npgsql.PostgresException: 42601: syntax error at or near ")"
+```
+
+## Root Cause
+
+- **Oracle**: Treats `FROM(TABLE_NAME)` as equivalent to `FROM TABLE_NAME`
+- **PostgreSQL**: Parentheses in the FROM clause are only valid for:
+ - Subqueries: `FROM (SELECT * FROM table)`
+ - Explicit table references that are part of join syntax
+ - Common Table Expressions (CTEs)
+ - Without a valid SELECT or join context, PostgreSQL raises a syntax error
+
+## Solution Pattern
+
+Remove the unnecessary parentheses around the table name:
+
+```sql
+-- Oracle (problematic in PostgreSQL)
+SELECT col1, col2
+FROM (TABLE_NAME)
+WHERE id = 1;
+
+-- PostgreSQL (correct)
+SELECT col1, col2
+FROM TABLE_NAME
+WHERE id = 1;
+```
+
+## Examples
+
+### Example 1: Simple Table Reference
+
+```sql
+-- Oracle
+SELECT employee_id, employee_name
+FROM (EMPLOYEES)
+WHERE department_id = 10;
+
+-- PostgreSQL (fixed)
+SELECT employee_id, employee_name
+FROM EMPLOYEES
+WHERE department_id = 10;
+```
+
+### Example 2: Join with Parentheses
+
+```sql
+-- Oracle (problematic)
+SELECT e.employee_id, d.department_name
+FROM (EMPLOYEES) e
+JOIN (DEPARTMENTS) d ON e.department_id = d.department_id;
+
+-- PostgreSQL (fixed)
+SELECT e.employee_id, d.department_name
+FROM EMPLOYEES e
+JOIN DEPARTMENTS d ON e.department_id = d.department_id;
+```
+
+### Example 3: Valid Subquery Parentheses (Works in Both)
+
+```sql
+-- Both Oracle and PostgreSQL
+SELECT *
+FROM (SELECT employee_id, employee_name FROM EMPLOYEES WHERE department_id = 10) sub;
+```
+
+## Migration Checklist
+
+When fixing this issue, verify:
+
+1. **Identify all problematic FROM clauses**:
+ - Search for `FROM (` pattern in SQL
+ - Verify the opening parenthesis is immediately after `FROM` followed by a table name
+ - Confirm it's **not** a subquery (no SELECT keyword inside)
+
+2. **Distinguish valid parentheses**:
+ - β `FROM (SELECT ...)` - Valid subquery
+ - β `FROM (table_name` followed by a join - Check if JOIN keyword follows
+ - β `FROM (TABLE_NAME)` - Invalid, remove parentheses
+
+3. **Apply the fix**:
+ - Remove the parentheses around the table name
+ - Keep parentheses for legitimate subqueries
+
+4. **Test thoroughly**:
+ - Execute the query in PostgreSQL
+ - Verify result set matches original Oracle query
+ - Include in integration tests
+
+## Common Locations
+
+Search for `FROM (` in:
+
+- β Stored procedures and functions (DDL scripts)
+- β Application data access layers (DAL classes)
+- β Dynamic SQL builders
+- β Reporting queries
+- β Views and materialized views
+- β Complex queries with multiple joins
+
+## Application Code Examples
+
+### VB.NET
+
+```vb
+' Before (Oracle)
+StrSQL = "SELECT employee_id, NAME " _
+ & "FROM (EMPLOYEES) e " _
+ & "WHERE e.department_id = 10"
+
+' After (PostgreSQL)
+StrSQL = "SELECT employee_id, NAME " _
+ & "FROM EMPLOYEES e " _
+ & "WHERE e.department_id = 10"
+```
+
+### C #
+
+```csharp
+// Before (Oracle)
+var sql = "SELECT id, name FROM (USERS) WHERE status = @status";
+
+// After (PostgreSQL)
+var sql = "SELECT id, name FROM USERS WHERE status = @status";
+```
+
+## Error Messages to Watch For
+
+```
+Npgsql.PostgresException: 42601: syntax error at or near ")"
+ERROR: syntax error at or near ")"
+LINE 1: SELECT * FROM (TABLE_NAME) WHERE ...
+ ^
+```
+
+## Testing Recommendations
+
+1. **Syntax Verification**: Parse all migrated queries to ensure they run without syntax errors
+
+ ```csharp
+ [Fact]
+ public void GetEmployees_ExecutesWithoutSyntaxError()
+ {
+ // Should not throw PostgresException with error code 42601
+ var employees = dal.GetEmployees(departmentId: 10);
+ Assert.NotEmpty(employees);
+ }
+ ```
+
+2. **Result Comparison**: Verify that result sets are identical before and after migration
+3. **Regex-based Search**: Use pattern `FROM\s*\(\s*[A-Za-z_][A-Za-z0-9_]*\s*\)` to identify candidates
+
+## Related Files
+
+- Reference: [oracle-to-postgres-type-coercion.md](oracle-to-postgres-type-coercion.md) - Other syntax differences
+- PostgreSQL Documentation: [SELECT Statement](https://www.postgresql.org/docs/current/sql-select.html)
+
+## Migration Notes
+
+- This is a straightforward syntactic fix with no semantic implications
+- No data conversion required
+- Safe to apply automated find-and-replace, but manually verify complex queries
+- Update integration tests to exercise the migrated queries
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-sorting.md b/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-sorting.md
new file mode 100644
index 00000000..d1622bc5
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-sorting.md
@@ -0,0 +1,51 @@
+# Oracle to PostgreSQL Sorting Migration Guide
+
+Purpose: Preserve Oracle-like sorting semantics when moving queries to PostgreSQL.
+
+## Key points
+- Oracle often treats plain `ORDER BY` as binary/byte-wise, giving case-insensitive ordering for ASCII.
+- PostgreSQL defaults differ; to match Oracle behavior, use `COLLATE "C"` on sort expressions.
+
+## 1) Standard `SELECT β¦ ORDER BY`
+**Goal:** Keep Oracle-style ordering.
+
+**Pattern:**
+```sql
+SELECT col1
+FROM your_table
+ORDER BY col1 COLLATE "C";
+```
+
+**Notes:**
+- Apply `COLLATE "C"` to each sort expression that must mimic Oracle.
+- Works with ascending/descending and multi-column sorts, e.g. `ORDER BY col1 COLLATE "C", col2 COLLATE "C" DESC`.
+
+## 2) `SELECT DISTINCT β¦ ORDER BY`
+**Issue:** PostgreSQL enforces that `ORDER BY` expressions appear in the `SELECT` list for `DISTINCT`, raising:
+`Npgsql.PostgresException: 42P10: for SELECT DISTINCT, ORDER BY expressions must appear in select list`
+
+**Oracle difference:** Oracle allowed ordering by expressions not projected when using `DISTINCT`.
+
+**Recommended pattern (wrap and sort):**
+```sql
+SELECT *
+FROM (
+ SELECT DISTINCT col1, col2
+ FROM your_table
+) AS distinct_results
+ORDER BY col2 COLLATE "C";
+```
+
+**Why:**
+- The inner query performs the `DISTINCT` projection.
+- The outer query safely orders the result set and adds `COLLATE "C"` to align with Oracle sorting.
+
+**Tips:**
+- Ensure any columns used in the outer `ORDER BY` are included in the inner projection.
+- For multi-column sorts, collate each relevant expression: `ORDER BY col2 COLLATE "C", col3 COLLATE "C" DESC`.
+
+## Validation checklist
+- [ ] Added `COLLATE "C"` to every `ORDER BY` that should follow Oracle sorting rules.
+- [ ] For `DISTINCT` queries, wrapped the projection and sorted in the outer query.
+- [ ] Confirmed ordered columns are present in the inner projection.
+- [ ] Re-ran tests or representative queries to verify ordering matches Oracle outputs.
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-timestamp-timezone.md b/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-timestamp-timezone.md
new file mode 100644
index 00000000..1cd643b8
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-timestamp-timezone.md
@@ -0,0 +1,187 @@
+# Oracle to PostgreSQL: CURRENT_TIMESTAMP and NOW() Timezone Handling
+
+## Contents
+
+- Problem
+- Behavior Comparison
+- PostgreSQL Timezone Precedence
+- Common Error Symptoms
+- Migration Actions β Npgsql config, DateTime normalization, stored procedures, session timezone, application code
+- Integration Test Patterns
+- Checklist
+
+## Problem
+
+Oracle's `CURRENT_TIMESTAMP` returns a value in the **session timezone** and stores it in the column's declared precision. When .NET reads this value back via ODP.NET, it is surfaced as a `DateTime` with `Kind=Local`, reflecting the OS timezone of the client.
+
+PostgreSQL's `CURRENT_TIMESTAMP` and `NOW()` both return a `timestamptz` (timestamp with time zone) anchored to **UTC**, regardless of the session timezone setting. How Npgsql surfaces this value depends on the driver version and configuration:
+
+- **Npgsql < 6 / legacy mode (`EnableLegacyTimestampBehavior = true`):** `timestamptz` columns are returned as `DateTime` with `Kind=Unspecified`. This is the source of silent timezone bugs when migrating from Oracle.
+- **Npgsql 6+ with legacy mode disabled (the new default):** `timestamptz` columns are returned as `DateTime` with `Kind=Utc`, and writing a `Kind=Unspecified` value throws an exception at insertion time.
+
+Projects that have not yet upgraded to Npgsql 6+, or that explicitly opt back into legacy mode, remain vulnerable to the `Kind=Unspecified` issue. This mismatch β and the ease of accidentally re-enabling legacy mode β causes silent data corruption, incorrect comparisons, and off-by-N-hours bugs that are extremely difficult to trace.
+
+---
+
+## Behavior Comparison
+
+| Aspect | Oracle | PostgreSQL |
+|---|---|---|
+| `CURRENT_TIMESTAMP` type | `TIMESTAMP WITH LOCAL TIME ZONE` | `timestamptz` (UTC-normalised) |
+| Client `DateTime.Kind` via driver | `Local` | `Unspecified` (Npgsql < 6 / legacy mode); `Utc` (Npgsql 6+ default) |
+| Session timezone influence | Yes β affects stored/returned value | Affects *display* only; UTC stored internally |
+| NOW() equivalent | `SYSDATE` / `CURRENT_TIMESTAMP` | `NOW()` = `CURRENT_TIMESTAMP` (both return `timestamptz`) |
+| Implicit conversion on comparison | Oracle applies session TZ offset | PostgreSQL compares UTC; session TZ is display-only |
+
+---
+
+## PostgreSQL Timezone Precedence
+
+PostgreSQL resolves the effective session timezone using the following hierarchy (highest priority wins):
+
+| Level | How it is set |
+|---|---|
+| **Session** | `SET TimeZone = 'UTC'` sent at connection open |
+| **Role** | `ALTER ROLE app_user SET TimeZone = 'UTC'` |
+| **Database** | `ALTER DATABASE mydb SET TimeZone = 'UTC'` |
+| **Server** | `postgresql.conf` β `TimeZone = 'America/New_York'` |
+
+The session timezone does **not** affect the stored UTC value of a `timestamptz` column β it only controls how `SHOW timezone` and `::text` casts format a value for display. Application code that relies on `DateTime.Kind` or compares timestamps without an explicit timezone can produce incorrect results if the server's default timezone is not UTC.
+
+---
+
+## Common Error Symptoms
+
+- Timestamps read from PostgreSQL have `Kind=Unspecified`; comparisons with `DateTime.UtcNow` or `DateTime.Now` produce incorrect results.
+- Date-range queries return too few or too many rows because the WHERE clause comparison is evaluated in a timezone that differs from the stored UTC value.
+- Integration tests pass on a developer machine (UTC OS timezone) but fail in CI or production (non-UTC timezone).
+- Stored procedure output parameters carrying timestamps arrive with a session-offset applied by the server but are then compared to UTC values in the application.
+
+---
+
+## Migration Actions
+
+### 1. Configure Npgsql for UTC via Connection String or AppContext
+
+Npgsql 6+ ships with `EnableLegacyTimestampBehavior` set to `false` by default, which causes `timestamptz` values to be returned as `DateTime` with `Kind=Utc`. Explicitly setting the switch at startup is still recommended to guard against accidental opt-in to legacy mode (e.g., via a config file or a transitive dependency) and to make the intent visible to future maintainers:
+
+```csharp
+// Program.cs / Startup.cs β apply once at application start
+AppContext.SetSwitch("Npgsql.EnableLegacyTimestampBehavior", false);
+```
+
+With this switch disabled, Npgsql throws if you try to write a `DateTime` with `Kind=Unspecified` to a `timestamptz` column, making timezone bugs loud and detectable at insertion time rather than silently at query time.
+
+### 2. Normalise DateTime Values Before Persistence
+
+Replace any `DateTime.Now` with `DateTime.UtcNow` throughout the migrated codebase. For values that originate from external input (e.g., user-provided dates deserialized from JSON), ensure they are converted to UTC before being saved:
+
+```csharp
+// Before (Oracle-era code β relied on session/OS timezone)
+var timestamp = DateTime.Now;
+
+// After (PostgreSQL-compatible)
+var timestamp = DateTime.UtcNow;
+
+// For externally-supplied values
+var utcTimestamp = dateTimeInput.Kind == DateTimeKind.Utc
+ ? dateTimeInput
+ : dateTimeInput.ToUniversalTime();
+```
+
+### 3. Fix Stored Procedures Using CURRENT_TIMESTAMP / NOW()
+
+Stored procedures that assign `CURRENT_TIMESTAMP` or `NOW()` to a `timestamp without time zone` (`timestamp`) column must be reviewed. Prefer `timestamptz` columns or cast explicitly:
+
+```sql
+-- Ambiguous: server timezone influences interpretation
+INSERT INTO audit_log (created_at) VALUES (NOW()::timestamp);
+
+-- Safe: always UTC
+INSERT INTO audit_log (created_at) VALUES (NOW() AT TIME ZONE 'UTC');
+
+-- Or: use timestamptz column type and let PostgreSQL store UTC natively
+INSERT INTO audit_log (created_at) VALUES (CURRENT_TIMESTAMP);
+```
+
+### 4. Force Session Timezone on Connection Open (Defence-in-Depth)
+
+Regardless of role or database defaults, set the session timezone explicitly when opening a connection. This guarantees consistent behavior independent of server configuration:
+
+```csharp
+// Npgsql connection string approach
+var connString = "Host=localhost;Database=mydb;Username=app;Password=...;Timezone=UTC";
+
+// Or: apply via NpgsqlDataSourceBuilder
+var dataSource = new NpgsqlDataSourceBuilder(connString)
+ .Build();
+
+// Or: execute on every new connection
+await using var conn = new NpgsqlConnection(connString);
+await conn.OpenAsync();
+await using var cmd = new NpgsqlCommand("SET TimeZone = 'UTC'", conn);
+await cmd.ExecuteNonQueryAsync();
+```
+
+### 5. Application Code β Avoid DateTime.Kind=Unspecified
+
+Audit all repository and data-access code that reads timestamp columns. Where Npgsql returns `Unspecified`, either configure the data source globally (option 1 above) or wrap the read:
+
+```csharp
+// Safe reader helper β convert Unspecified to Utc at the boundary
+DateTime ReadUtcDateTime(NpgsqlDataReader reader, int ordinal)
+{
+ var dt = reader.GetDateTime(ordinal);
+ return dt.Kind == DateTimeKind.Unspecified
+ ? DateTime.SpecifyKind(dt, DateTimeKind.Utc)
+ : dt.ToUniversalTime();
+}
+```
+
+---
+
+## Integration Test Patterns
+
+### Test: Verify timestamps persist and return as UTC
+
+```csharp
+[Fact]
+public async Task InsertedTimestamp_ShouldRoundTripAsUtc()
+{
+ var before = DateTime.UtcNow;
+
+ await repository.InsertAuditEntryAsync(/* ... */);
+
+ var retrieved = await repository.GetLatestAuditEntryAsync();
+
+ Assert.Equal(DateTimeKind.Utc, retrieved.CreatedAt.Kind);
+ Assert.True(retrieved.CreatedAt >= before,
+ "Persisted CreatedAt should not be earlier than the pre-insert UTC timestamp.");
+}
+```
+
+### Test: Verify timestamp comparisons across Oracle and PostgreSQL baselines
+
+```csharp
+[Fact]
+public async Task TimestampComparison_ShouldReturnSameRowsAsOracle()
+{
+ var cutoff = DateTime.UtcNow.AddDays(-1);
+
+ var oracleResults = await oracleRepository.GetEntriesAfter(cutoff);
+ var postgresResults = await postgresRepository.GetEntriesAfter(cutoff);
+
+ Assert.Equal(oracleResults.Count, postgresResults.Count);
+}
+```
+
+---
+
+## Checklist
+
+- [ ] `AppContext.SetSwitch("Npgsql.EnableLegacyTimestampBehavior", false)` applied at application startup.
+- [ ] All `DateTime.Now` usages in data-access code replaced with `DateTime.UtcNow`.
+- [ ] Connection string or connection-open hook sets `Timezone=UTC` / `SET TimeZone = 'UTC'`.
+- [ ] Stored procedures that use `CURRENT_TIMESTAMP` or `NOW()` reviewed; `timestamp without time zone` columns explicitly cast or replaced with `timestamptz`.
+- [ ] Integration tests assert `DateTime.Kind == Utc` on retrieved timestamp values.
+- [ ] Tests cover date-range queries to confirm row counts match Oracle baseline.
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-to-char-numeric.md b/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-to-char-numeric.md
new file mode 100644
index 00000000..1b90c221
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-to-char-numeric.md
@@ -0,0 +1,145 @@
+# Oracle to PostgreSQL: TO_CHAR() Numeric Conversions
+
+## Contents
+
+- Problem
+- Root Cause
+- Solution Patterns β CAST, format string, concatenation
+- Migration Checklist
+- Application Code Review
+- Testing Recommendations
+- Common Locations
+- Error Messages to Watch For
+
+## Problem
+
+Oracle allows `TO_CHAR()` to convert numeric types to strings without a format specifier:
+
+```sql
+-- Oracle: Works fine
+SELECT TO_CHAR(vessel_id) FROM vessels;
+SELECT TO_CHAR(fiscal_year) FROM certificates;
+```
+
+PostgreSQL requires a format string when using `TO_CHAR()` with numeric types, otherwise it raises:
+
+```
+42883: function to_char(numeric) does not exist
+```
+
+## Root Cause
+
+- **Oracle**: `TO_CHAR(number)` without a format mask implicitly converts the number to a string using default formatting
+- **PostgreSQL**: `TO_CHAR()` always requires an explicit format string for numeric types (e.g., `'999999'`, `'FM999999'`)
+
+## Solution Patterns
+
+### Pattern 1: Use CAST (Recommended)
+
+The cleanest migration approach is to replace `TO_CHAR(numeric_column)` with `CAST(numeric_column AS TEXT)`:
+
+```sql
+-- Oracle
+SELECT TO_CHAR(vessel_id) AS vessel_item FROM vessels;
+
+-- PostgreSQL (preferred)
+SELECT CAST(vessel_id AS TEXT) AS vessel_item FROM vessels;
+```
+
+**Advantages:**
+
+- More idiomatic in PostgreSQL
+- Clearer intent
+- No format string needed
+
+### Pattern 2: Provide Format String
+
+If you need specific numeric formatting, use an explicit format mask:
+
+```sql
+-- PostgreSQL with format
+SELECT TO_CHAR(vessel_id, 'FM999999') AS vessel_item FROM vessels;
+SELECT TO_CHAR(amount, 'FM999999.00') AS amount_text FROM payments;
+```
+
+**Format masks:**
+
+- `'FM999999'`: Fixed-width integer (FM = Fill Mode, removes leading spaces)
+- `'FM999999.00'`: Decimal with 2 places
+- `'999,999.00'`: With thousand separators
+
+### Pattern 3: String Concatenation
+
+For simple concatenation where numeric conversion is implicit:
+
+```sql
+-- Oracle
+WHERE TO_CHAR(fiscal_year) = '2024'
+
+-- PostgreSQL (using concatenation)
+WHERE fiscal_year::TEXT = '2024'
+-- or
+WHERE CAST(fiscal_year AS TEXT) = '2024'
+```
+
+## Migration Checklist
+
+When migrating SQL containing `TO_CHAR()`:
+
+1. **Identify all TO_CHAR() calls**: Search for `TO_CHAR\(` in SQL strings, stored procedures, and application queries
+2. **Check the argument type**:
+ - **DATE/TIMESTAMP**: Keep `TO_CHAR()` with format string (e.g., `TO_CHAR(date_col, 'YYYY-MM-DD')`)
+ - **NUMERIC/INTEGER**: Replace with `CAST(... AS TEXT)` or add format string
+3. **Test the output**: Verify that the string representation matches expectations (no unexpected spaces, decimals, etc.)
+4. **Update comparison logic**: If comparing numeric-to-string, ensure consistent types on both sides
+
+## Application Code Review
+
+### C# Example
+
+```csharp
+// Before (Oracle)
+var sql = "SELECT TO_CHAR(id) AS id_text FROM entities WHERE TO_CHAR(status) = @status";
+
+// After (PostgreSQL)
+var sql = "SELECT CAST(id AS TEXT) AS id_text FROM entities WHERE CAST(status AS TEXT) = @status";
+```
+
+## Testing Recommendations
+
+1. **Unit Tests**: Verify numeric-to-string conversions return expected values
+
+ ```csharp
+ [Fact]
+ public void GetVesselNumbers_ReturnsVesselIdsAsStrings()
+ {
+ var results = dal.GetVesselNumbers(certificateType);
+ Assert.All(results, item => Assert.True(int.TryParse(item.DISPLAY_MEMBER, out _)));
+ }
+ ```
+
+2. **Integration Tests**: Ensure queries with `CAST()` execute without errors
+3. **Comparison Tests**: Verify WHERE clauses with numeric-to-string comparisons filter correctly
+
+## Common Locations
+
+Search for `TO_CHAR` in:
+
+- β Stored procedures and functions (DDL scripts)
+- β Application data access layers (DAL classes)
+- β Dynamic SQL builders
+- β Reporting queries
+- β ORM/Entity Framework raw SQL
+
+## Error Messages to Watch For
+
+```
+Npgsql.PostgresException: 42883: function to_char(numeric) does not exist
+Npgsql.PostgresException: 42883: function to_char(integer) does not exist
+Npgsql.PostgresException: 42883: function to_char(bigint) does not exist
+```
+
+## See Also
+
+- [oracle-to-postgres-type-coercion.md](oracle-to-postgres-type-coercion.md) - Related type conversion issues
+- PostgreSQL Documentation: [Data Type Formatting Functions](https://www.postgresql.org/docs/current/functions-formatting.html)
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-type-coercion.md b/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-type-coercion.md
new file mode 100644
index 00000000..60ce72fa
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/oracle-to-postgres-type-coercion.md
@@ -0,0 +1,182 @@
+# Oracle to PostgreSQL Type Coercion Issues
+
+## Contents
+
+- Overview
+- The Problem β symptom, root cause, example
+- The Solution β string literals, explicit casting
+- Common Comparison Operators Affected
+- Detection Strategy
+- Real-World Example
+- Prevention Best Practices
+
+## Overview
+
+This document describes a common migration issue encountered when porting SQL code from Oracle to PostgreSQL. The issue stems from fundamental differences in how these databases handle implicit type conversions in comparison operators.
+
+## The Problem
+
+### Symptom
+
+When migrating SQL queries from Oracle to PostgreSQL, you may encounter the following error:
+
+```
+Npgsql.PostgresException: 42883: operator does not exist: character varying <> integer
+POSITION: [line_number]
+```
+
+### Root Cause
+
+PostgreSQL has **strict type enforcement** and does not perform implicit type coercion in comparison operators. Oracle, by contrast, automatically converts operands to compatible types during comparison operations.
+
+#### Example Mismatch
+
+**Oracle SQL (works fine):**
+
+```sql
+AND physical_address.pcountry_cd <> 124
+```
+
+- `pcountry_cd` is a `VARCHAR2`
+- `124` is an integer literal
+- Oracle silently converts `124` to a string for comparison
+
+**PostgreSQL (fails):**
+
+```sql
+AND physical_address.pcountry_cd <> 124
+```
+
+```
+42883: operator does not exist: character varying <> integer
+```
+
+- `pcountry_cd` is a `character varying`
+- `124` is an integer literal
+- PostgreSQL rejects the comparison because the types don't match
+
+## The Solution
+
+### Approach 1: Use String Literals (Recommended)
+
+Convert integer literals to string literals:
+
+```sql
+AND physical_address.pcountry_cd <> '124'
+```
+
+**Pros:**
+
+- Semantically correct (country codes are typically stored as strings)
+- Most efficient
+- Clearest intent
+
+**Cons:**
+
+- None
+
+### Approach 2: Explicit Type Casting
+
+Explicitly cast the integer to a string type:
+
+```sql
+AND physical_address.pcountry_cd <> CAST(124 AS VARCHAR)
+```
+
+**Pros:**
+
+- Makes the conversion explicit and visible
+- Useful if the value is a parameter or complex expression
+
+**Cons:**
+
+- Slightly less efficient
+- More verbose
+
+## Common Comparison Operators Affected
+
+All comparison operators can trigger this issue:
+
+- `<>` (not equal)
+- `=` (equal)
+- `<` (less than)
+- `>` (greater than)
+- `<=` (less than or equal)
+- `>=` (greater than or equal)
+
+## Detection Strategy
+
+When migrating from Oracle to PostgreSQL:
+
+1. **Search for numeric literals in WHERE clauses** comparing against string/varchar columns
+2. **Look for patterns like:**
+ - `column_name <> 123` (where column is VARCHAR/CHAR)
+ - `column_name = 456` (where column is VARCHAR/CHAR)
+ - `column_name IN (1, 2, 3)` (where column is VARCHAR/CHAR)
+
+3. **Code review checklist:**
+ - Are all comparison values correctly typed?
+ - Do string columns always use string literals?
+ - Are numeric columns always compared against numeric values?
+
+## Real-World Example
+
+**Original Oracle Query:**
+
+```sql
+SELECT ac040.stakeholder_id,
+ ac006.organization_etxt
+ FROM ac040_stakeholder ac040
+ INNER JOIN ac006_organization ac006 ON ac040.stakeholder_id = ac006.organization_id
+ WHERE physical_address.pcountry_cd <> 124
+ AND LOWER(ac006.organization_etxt) LIKE '%' || @orgtxt || '%'
+ ORDER BY UPPER(ac006.organization_etxt)
+```
+
+**Fixed PostgreSQL Query:**
+
+```sql
+SELECT ac040.stakeholder_id,
+ ac006.organization_etxt
+ FROM ac040_stakeholder ac040
+ INNER JOIN ac006_organization ac006 ON ac040.stakeholder_id = ac006.organization_id
+ WHERE physical_address.pcountry_cd <> '124'
+ AND LOWER(ac006.organization_etxt) LIKE '%' || @orgtxt || '%'
+ ORDER BY UPPER(ac006.organization_etxt)
+```
+
+**Change:** `124` β `'124'`
+
+## Prevention Best Practices
+
+1. **Use Type-Consistent Literals:**
+ - For string columns: Always use string literals (`'value'`)
+ - For numeric columns: Always use numeric literals (`123`)
+ - For dates: Always use date literals (`DATE '2024-01-01'`)
+
+2. **Leverage Database Tools:**
+ - Use your IDE's SQL linter to catch type mismatches
+ - Run PostgreSQL syntax validation during code review
+
+3. **Test Early:**
+ - Execute migration queries against PostgreSQL before deployment
+ - Include integration tests that exercise all comparison operators
+
+4. **Documentation:**
+ - Document any type coercions in comments
+ - Mark migrated code with revision history
+
+## References
+
+- [PostgreSQL Type Casting Documentation](https://www.postgresql.org/docs/current/sql-syntax.html)
+- [Oracle Type Conversion Documentation](https://docs.oracle.com/database/121/SQLRF/sql_elements003.htm)
+- [Npgsql Exception: Operator Does Not Exist](https://www.npgsql.org/doc/api/NpgsqlException.html)
+
+## Related Issues
+
+This issue is part of broader Oracle β PostgreSQL migration challenges:
+
+- Implicit function conversions (e.g., `TO_CHAR`, `TO_DATE`)
+- String concatenation operator differences (`||` works in both, but behavior differs)
+- Numeric precision and rounding differences
+- NULL handling in comparisons
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/postgres-concurrent-transactions.md b/skills/reviewing-oracle-to-postgres-migration/references/postgres-concurrent-transactions.md
new file mode 100644
index 00000000..3d5e212d
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/postgres-concurrent-transactions.md
@@ -0,0 +1,259 @@
+# Oracle to PostgreSQL: Concurrent Transaction Handling
+
+## Contents
+
+- Overview
+- The Core Difference
+- Common Error Symptoms
+- Problem Scenarios
+- Solutions β materialize results, separate connections, single query
+- Detection Strategy
+- Error Messages to Watch For
+- Comparison Table
+- Best Practices
+- Migration Checklist
+
+## Overview
+
+When migrating from Oracle to PostgreSQL, a critical difference exists in how **concurrent operations on a single database connection** are handled. Oracle's ODP.NET driver allows multiple active commands and result sets on the same connection simultaneously, while PostgreSQL's Npgsql driver enforces a strict **one active command per connection** rule. Code that worked seamlessly in Oracle will throw runtime exceptions in PostgreSQL if concurrent operations share a connection.
+
+## The Core Difference
+
+**Oracle Behavior:**
+
+- A single connection can have multiple active commands executing concurrently
+- Opening a second `DataReader` while another is still open is permitted
+- Nested or overlapping database calls on the same connection work transparently
+
+**PostgreSQL Behavior:**
+
+- A connection supports only **one active command at a time**
+- Attempting to execute a second command while a `DataReader` is open throws an exception
+- Lazy-loaded navigation properties or callback-driven reads that trigger additional queries on the same connection will fail
+
+## Common Error Symptoms
+
+When migrating Oracle code without accounting for this difference:
+
+```
+System.InvalidOperationException: An operation is already in progress.
+```
+
+```
+Npgsql.NpgsqlOperationInProgressException: A command is already in progress:
+```
+
+These occur when application code attempts to execute a new command on a connection that already has an active `DataReader` or uncommitted command in flight.
+
+---
+
+## Problem Scenarios
+
+### Scenario 1: Iterating a DataReader While Executing Another Command
+
+```csharp
+using (var reader = command1.ExecuteReader())
+{
+ while (reader.Read())
+ {
+ // PROBLEM: executing a second command on the same connection
+ // while the reader is still open
+ using (var command2 = new NpgsqlCommand("SELECT ...", connection))
+ {
+ var value = command2.ExecuteScalar(); // FAILS
+ }
+ }
+}
+```
+
+### Scenario 2: Lazy Loading / Deferred Execution in Data Access Layers
+
+```csharp
+// Oracle: works because ODP.NET supports concurrent readers
+var items = repository.GetItems(); // returns IEnumerable backed by open DataReader
+foreach (var item in items)
+{
+ // PROBLEM: triggers a second query on the same connection
+ var details = repository.GetDetails(item.Id); // FAILS on PostgreSQL
+}
+```
+
+### Scenario 3: Nested Stored Procedure Calls via Application Code
+
+```csharp
+// Oracle: ODP.NET handles multiple active commands
+command1.ExecuteNonQuery(); // starts a long-running operation
+command2.ExecuteScalar(); // FAILS on PostgreSQL β command1 still in progress
+```
+
+---
+
+## Solutions
+
+### Solution 1: Materialize Results Before Issuing New Commands (Recommended)
+
+Close the first result set by loading it into memory before executing subsequent commands on the same connection.
+
+```csharp
+// Load all results into a list first
+var items = new List();
+using (var reader = command1.ExecuteReader())
+{
+ while (reader.Read())
+ {
+ items.Add(MapItem(reader));
+ }
+} // reader is closed and disposed here
+
+// Now safe to execute another command on the same connection
+foreach (var item in items)
+{
+ using (var command2 = new NpgsqlCommand("SELECT ...", connection))
+ {
+ command2.Parameters.AddWithValue("id", item.Id);
+ var value = command2.ExecuteScalar(); // Works
+ }
+}
+```
+
+For LINQ / EF Core scenarios, force materialization with `.ToList()`:
+
+```csharp
+// Before (fails on PostgreSQL β deferred execution keeps connection busy)
+var items = dbContext.Items.Where(i => i.Active);
+foreach (var item in items)
+{
+ var details = dbContext.Details.FirstOrDefault(d => d.ItemId == item.Id);
+}
+
+// After (materializes first query before issuing second)
+var items = dbContext.Items.Where(i => i.Active).ToList();
+foreach (var item in items)
+{
+ var details = dbContext.Details.FirstOrDefault(d => d.ItemId == item.Id);
+}
+```
+
+### Solution 2: Use Separate Connections for Concurrent Operations
+
+When operations genuinely need to run concurrently, open a dedicated connection for each.
+
+```csharp
+using (var reader = command1.ExecuteReader())
+{
+ while (reader.Read())
+ {
+ // Use a separate connection for the nested query
+ using (var connection2 = new NpgsqlConnection(connectionString))
+ {
+ connection2.Open();
+ using (var command2 = new NpgsqlCommand("SELECT ...", connection2))
+ {
+ var value = command2.ExecuteScalar(); // Works β different connection
+ }
+ }
+ }
+}
+```
+
+### Solution 3: Restructure to a Single Query
+
+Where possible, combine nested lookups into a single query using JOINs or subqueries to eliminate the need for concurrent commands entirely.
+
+```csharp
+// Before: two sequential queries on the same connection
+var order = GetOrder(orderId); // query 1
+var details = GetOrderDetails(orderId); // query 2 (fails if query 1 reader still open)
+
+// After: single query with JOIN
+using (var command = new NpgsqlCommand(
+ "SELECT o.*, d.* FROM orders o JOIN order_details d ON o.id = d.order_id WHERE o.id = @id",
+ connection))
+{
+ command.Parameters.AddWithValue("id", orderId);
+ using (var reader = command.ExecuteReader())
+ {
+ // Process combined result set
+ }
+}
+```
+
+---
+
+## Detection Strategy
+
+### Code Review Checklist
+
+- [ ] Search for methods that open a `DataReader` and call other database methods before closing it
+- [ ] Look for `IEnumerable` return types from data access methods that defer execution (indicate open readers)
+- [ ] Identify EF Core queries without `.ToList()` / `.ToArray()` that are iterated while issuing further queries
+- [ ] Check for nested stored procedure calls in application code that share a connection
+
+### Common Locations to Search
+
+- Data access layers and repository classes
+- Service methods that orchestrate multiple repository calls
+- Code paths that iterate query results and perform lookups per row
+- Event handlers or callbacks triggered during data iteration
+
+### Search Patterns
+
+```regex
+ExecuteReader\(.*\)[\s\S]*?Execute(Scalar|NonQuery|Reader)\(
+```
+
+```regex
+\.Where\(.*\)[\s\S]*?foreach[\s\S]*?dbContext\.
+```
+
+---
+
+## Error Messages to Watch For
+
+| Error Message | Likely Cause |
+|---------------|--------------|
+| `An operation is already in progress` | Second command executed while a `DataReader` is open on the same connection |
+| `A command is already in progress: ` | Npgsql detected overlapping command execution on a single connection |
+| `The connection is already in state 'Executing'` | Connection state conflict from concurrent usage |
+
+---
+
+## Comparison Table: Oracle vs. PostgreSQL
+
+| Aspect | Oracle (ODP.NET) | PostgreSQL (Npgsql) |
+|--------|------------------|---------------------|
+| **Concurrent commands** | Multiple active commands per connection | One active command per connection |
+| **Multiple open DataReaders** | Supported | Not supported β must close/materialize first |
+| **Nested DB calls during iteration** | Transparent | Throws `InvalidOperationException` |
+| **Deferred execution safety** | Safe to iterate and query | Must materialize (`.ToList()`) before issuing new queries |
+| **Connection pooling impact** | Lower connection demand | May need more pooled connections if using Solution 2 |
+
+---
+
+## Best Practices
+
+1. **Materialize early** β Call `.ToList()` or `.ToArray()` on query results before iterating and issuing further database calls. This is the simplest and most reliable fix.
+
+2. **Audit data access patterns** β Review all repository and data access methods for deferred-execution return types (`IEnumerable`, `IQueryable`) that callers iterate while issuing additional queries.
+
+3. **Prefer single queries** β Where feasible, combine nested lookups into JOINs or subqueries to eliminate the concurrent-command pattern entirely.
+
+4. **Isolate connections when necessary** β If concurrent operations are genuinely required, use separate connections rather than attempting to share one.
+
+5. **Test iterative workflows** β Integration tests should cover scenarios where code iterates result sets and performs additional database operations per row, as these are the most common failure points.
+
+## Migration Checklist
+
+- [ ] Identify all code paths that execute multiple commands on a single connection concurrently
+- [ ] Locate `IEnumerable`-backed data access methods that defer execution with open readers
+- [ ] Add `.ToList()` / `.ToArray()` materialization where deferred results are iterated alongside further queries
+- [ ] Refactor nested database calls to use separate connections or combined queries where appropriate
+- [ ] Verify EF Core navigation properties and lazy loading do not trigger concurrent connection usage
+- [ ] Update integration tests to cover iterative data access patterns
+- [ ] Load-test connection pool sizing if Solution 2 (separate connections) is used extensively
+
+## References
+
+- [Npgsql Documentation: Basic Usage](https://www.npgsql.org/doc/basic-usage.html)
+- [PostgreSQL Documentation: Concurrency Control](https://www.postgresql.org/docs/current/mvcc.html)
+- [Npgsql GitHub: Multiple Active Result Sets Discussion](https://github.com/npgsql/npgsql/issues/462)
diff --git a/skills/reviewing-oracle-to-postgres-migration/references/postgres-refcursor-handling.md b/skills/reviewing-oracle-to-postgres-migration/references/postgres-refcursor-handling.md
new file mode 100644
index 00000000..e94a7b98
--- /dev/null
+++ b/skills/reviewing-oracle-to-postgres-migration/references/postgres-refcursor-handling.md
@@ -0,0 +1,148 @@
+# Oracle to PostgreSQL: Refcursor Handling in Client Applications
+
+## The Core Difference
+
+Oracle's driver automatically unwraps `SYS_REFCURSOR` output parameters, exposing the result set directly in the data reader. PostgreSQL's Npgsql driver instead returns a **cursor name** (e.g., `""`). The client must issue a separate `FETCH ALL FROM ""` command to retrieve actual rows.
+
+Failing to account for this causes:
+
+```
+System.IndexOutOfRangeException: Field not found in row:
+```
+
+The reader contains only the cursor-name parameter β not the expected result columns.
+
+> **Transaction requirement:** PostgreSQL refcursors are scoped to a transaction. Both the procedure call and the `FETCH` must execute within the same explicit transaction, or the cursor may be closed before the fetch completes under autocommit.
+
+## Solution: Explicit Refcursor Unwrapping (C#)
+
+```csharp
+public IEnumerable GetUsers(int departmentId)
+{
+ var users = new List();
+ using var connection = new NpgsqlConnection(connectionString);
+ connection.Open();
+
+ // Refcursors are transaction-scoped β wrap both the call and FETCH in one transaction.
+ using var tx = connection.BeginTransaction();
+
+ using var command = new NpgsqlCommand("get_users", connection, tx)
+ {
+ CommandType = CommandType.StoredProcedure
+ };
+ command.Parameters.AddWithValue("p_department_id", departmentId);
+ var refcursorParam = new NpgsqlParameter("cur_result", NpgsqlDbType.Refcursor)
+ {
+ Direction = ParameterDirection.Output
+ };
+ command.Parameters.Add(refcursorParam);
+
+ // Execute the procedure to open the cursor.
+ command.ExecuteNonQuery();
+
+ // Retrieve the cursor name, then fetch the actual data.
+ string cursorName = (string)refcursorParam.Value;
+ using var fetchCommand = new NpgsqlCommand($"FETCH ALL FROM \"{cursorName}\"", connection, tx);
+ using var reader = fetchCommand.ExecuteReader();
+ while (reader.Read())
+ {
+ users.Add(new User
+ {
+ UserId = reader.GetInt32(reader.GetOrdinal("user_id")),
+ UserName = reader.GetString(reader.GetOrdinal("user_name")),
+ Email = reader.GetString(reader.GetOrdinal("email"))
+ });
+ }
+
+ tx.Commit();
+ return users;
+}
+```
+
+## Reusable Helper
+
+Returning a live `NpgsqlDataReader` from a helper leaves the underlying `NpgsqlCommand` undisposed and creates ambiguous ownership. Prefer materializing results inside the helper instead:
+
+```csharp
+public static class PostgresHelpers
+{
+ public static List ExecuteRefcursorProcedure(
+ NpgsqlConnection connection,
+ NpgsqlTransaction transaction,
+ string procedureName,
+ Dictionary parameters,
+ string refcursorParameterName,
+ Func map)
+ {
+ using var command = new NpgsqlCommand(procedureName, connection, transaction)
+ {
+ CommandType = CommandType.StoredProcedure
+ };
+ foreach (var (key, value) in parameters)
+ command.Parameters.AddWithValue(key, value);
+
+ var refcursorParam = new NpgsqlParameter(refcursorParameterName, NpgsqlDbType.Refcursor)
+ {
+ Direction = ParameterDirection.Output
+ };
+ command.Parameters.Add(refcursorParam);
+ command.ExecuteNonQuery();
+
+ string cursorName = (string)refcursorParam.Value;
+ if (string.IsNullOrEmpty(cursorName))
+ return new List();
+
+ // fetchCommand is disposed here; results are fully materialized before returning.
+ using var fetchCommand = new NpgsqlCommand($"FETCH ALL FROM \"{cursorName}\"", connection, transaction);
+ using var reader = fetchCommand.ExecuteReader();
+
+ var results = new List();
+ while (reader.Read())
+ results.Add(map(reader));
+ return results;
+ }
+}
+
+// Usage:
+using var connection = new NpgsqlConnection(connectionString);
+connection.Open();
+using var tx = connection.BeginTransaction();
+
+var users = PostgresHelpers.ExecuteRefcursorProcedure(
+ connection, tx,
+ "get_users",
+ new Dictionary { { "p_department_id", departmentId } },
+ "cur_result",
+ r => new User
+ {
+ UserId = r.GetInt32(r.GetOrdinal("user_id")),
+ UserName = r.GetString(r.GetOrdinal("user_name")),
+ Email = r.GetString(r.GetOrdinal("email"))
+ });
+
+tx.Commit();
+```
+
+## Oracle vs. PostgreSQL Summary
+
+| Aspect | Oracle (ODP.NET) | PostgreSQL (Npgsql) |
+|--------|------------------|---------------------|
+| **Cursor return** | Result set exposed directly in data reader | Cursor name string in output parameter |
+| **Data access** | `ExecuteReader()` returns rows immediately | `ExecuteNonQuery()` β get cursor name β `FETCH ALL FROM` |
+| **Transaction** | Transparent | CALL and FETCH must share the same transaction |
+| **Multiple cursors** | Automatic | Each requires a separate `FETCH` command |
+| **Resource lifetime** | Driver-managed | Cursor is open until fetched or transaction ends |
+
+## Migration Checklist
+
+- [ ] Identify all procedures returning `SYS_REFCURSOR` (Oracle) / `refcursor` (PostgreSQL)
+- [ ] Replace `ExecuteReader()` with `ExecuteNonQuery()` β cursor name β `FETCH ALL FROM`
+- [ ] Wrap each call-and-fetch pair in an explicit transaction
+- [ ] Ensure commands and readers are disposed (prefer materializing results inside a helper)
+- [ ] Update unit and integration tests
+
+## References
+
+- [PostgreSQL Documentation: Cursors](https://www.postgresql.org/docs/current/plpgsql-cursors.html)
+- [PostgreSQL FETCH Command](https://www.postgresql.org/docs/current/sql-fetch.html)
+- [Npgsql Refcursor Support](https://github.com/npgsql/npgsql/issues/1887)
diff --git a/skills/scaffolding-oracle-to-postgres-migration-test-project/SKILL.md b/skills/scaffolding-oracle-to-postgres-migration-test-project/SKILL.md
new file mode 100644
index 00000000..efcb33d6
--- /dev/null
+++ b/skills/scaffolding-oracle-to-postgres-migration-test-project/SKILL.md
@@ -0,0 +1,54 @@
+---
+name: scaffolding-oracle-to-postgres-migration-test-project
+description: 'Scaffolds an xUnit integration test project for validating Oracle-to-PostgreSQL database migration behavior in .NET solutions. Creates the test project, transaction-rollback base class, and seed data manager. Use when setting up test infrastructure before writing migration integration tests, or when a test project is needed for Oracle-to-PostgreSQL validation.'
+---
+
+# Scaffolding an Integration Test Project for Oracle-to-PostgreSQL Migration
+
+Creates a compilable, empty xUnit test project with transaction management and seed data infrastructure for a single target project. Run once per project before writing tests.
+
+## Workflow
+
+```
+Progress:
+- [ ] Step 1: Inspect the target project
+- [ ] Step 2: Create the xUnit test project
+- [ ] Step 3: Implement transaction-rollback base class
+- [ ] Step 4: Implement seed data manager
+- [ ] Step 5: Verify the project compiles
+```
+
+**Step 1: Inspect the target project**
+
+Read the target project's `.csproj` to determine the .NET version and existing package references. Match these versions exactly β do not upgrade.
+
+**Step 2: Create the xUnit test project**
+
+- Target the same .NET version as the application under test.
+- Add NuGet packages for Oracle database connectivity and xUnit.
+- Add a project reference to the target project only β no other application projects.
+- Add an `appsettings.json` configured for Oracle database connectivity.
+
+**Step 3: Implement transaction-rollback base class**
+
+- Create a base test class that opens a transaction before each test and rolls it back after.
+- Catch and handle all exceptions to guarantee rollback.
+- Make the pattern inheritable by all downstream test classes.
+
+**Step 4: Implement seed data manager**
+
+- Create a global seed manager for loading test data within the transaction scope.
+- Do not commit seed data β transactions roll back after each test.
+- Do not use `TRUNCATE TABLE` β preserve existing database data.
+- Reuse existing seed files if available.
+- Establish a naming convention for seed file location that downstream test creation will follow.
+
+**Step 5: Verify the project compiles**
+
+Build the test project and confirm it compiles with zero errors before finishing.
+
+## Key Constraints
+
+- Oracle is the golden behavior source β scaffold for Oracle first.
+- Keep to existing .NET and C# versions; do not introduce newer language or runtime features.
+- Output is an empty test project with infrastructure only β no test cases.
diff --git a/skills/suggest-awesome-github-copilot-prompts/SKILL.md b/skills/suggest-awesome-github-copilot-prompts/SKILL.md
deleted file mode 100644
index efe487c8..00000000
--- a/skills/suggest-awesome-github-copilot-prompts/SKILL.md
+++ /dev/null
@@ -1,106 +0,0 @@
----
-name: suggest-awesome-github-copilot-prompts
-description: 'Suggest relevant GitHub Copilot prompt files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing prompts in this repository, and identifying outdated prompts that need updates.'
----
-
-# Suggest Awesome GitHub Copilot Prompts
-
-Analyze current repository context and suggest relevant prompt files from the [GitHub awesome-copilot repository](https://github.com/github/awesome-copilot/blob/main/docs/README.prompts.md) that are not already available in this repository.
-
-## Process
-
-1. **Fetch Available Prompts**: Extract prompt list and descriptions from [awesome-copilot README.prompts.md](https://github.com/github/awesome-copilot/blob/main/docs/README.prompts.md). Must use `#fetch` tool.
-2. **Scan Local Prompts**: Discover existing prompt files in `.github/prompts/` folder
-3. **Extract Descriptions**: Read front matter from local prompt files to get descriptions
-4. **Fetch Remote Versions**: For each local prompt, fetch the corresponding version from awesome-copilot repository using raw GitHub URLs (e.g., `https://raw.githubusercontent.com/github/awesome-copilot/main/prompts/`)
-5. **Compare Versions**: Compare local prompt content with remote versions to identify:
- - Prompts that are up-to-date (exact match)
- - Prompts that are outdated (content differs)
- - Key differences in outdated prompts (tools, description, content)
-6. **Analyze Context**: Review chat history, repository files, and current project needs
-7. **Compare Existing**: Check against prompts already available in this repository
-8. **Match Relevance**: Compare available prompts against identified patterns and requirements
-9. **Present Options**: Display relevant prompts with descriptions, rationale, and availability status including outdated prompts
-10. **Validate**: Ensure suggested prompts would add value not already covered by existing prompts
-11. **Output**: Provide structured table with suggestions, descriptions, and links to both awesome-copilot prompts and similar local prompts
- **AWAIT** user request to proceed with installation or updates of specific prompts. DO NOT INSTALL OR UPDATE UNLESS DIRECTED TO DO SO.
-12. **Download/Update Assets**: For requested prompts, automatically:
- - Download new prompts to `.github/prompts/` folder
- - Update outdated prompts by replacing with latest version from awesome-copilot
- - Do NOT adjust content of the files
- - Use `#fetch` tool to download assets, but may use `curl` using `#runInTerminal` tool to ensure all content is retrieved
- - Use `#todos` tool to track progress
-
-## Context Analysis Criteria
-
-π **Repository Patterns**:
-- Programming languages used (.cs, .js, .py, etc.)
-- Framework indicators (ASP.NET, React, Azure, etc.)
-- Project types (web apps, APIs, libraries, tools)
-- Documentation needs (README, specs, ADRs)
-
-π¨οΈ **Chat History Context**:
-- Recent discussions and pain points
-- Feature requests or implementation needs
-- Code review patterns
-- Development workflow requirements
-
-## Output Format
-
-Display analysis results in structured table comparing awesome-copilot prompts with existing repository prompts:
-
-| Awesome-Copilot Prompt | Description | Already Installed | Similar Local Prompt | Suggestion Rationale |
-|-------------------------|-------------|-------------------|---------------------|---------------------|
-| [code-review.prompt.md](https://github.com/github/awesome-copilot/blob/main/prompts/code-review.prompt.md) | Automated code review prompts | β No | None | Would enhance development workflow with standardized code review processes |
-| [documentation.prompt.md](https://github.com/github/awesome-copilot/blob/main/prompts/documentation.prompt.md) | Generate project documentation | β Yes | create_oo_component_documentation.prompt.md | Already covered by existing documentation prompts |
-| [debugging.prompt.md](https://github.com/github/awesome-copilot/blob/main/prompts/debugging.prompt.md) | Debug assistance prompts | β οΈ Outdated | debugging.prompt.md | Tools configuration differs: remote uses `'codebase'` vs local missing - Update recommended |
-
-## Local Prompts Discovery Process
-
-1. List all `*.prompt.md` files in `.github/prompts/` directory
-2. For each discovered file, read front matter to extract `description`
-3. Build comprehensive inventory of existing prompts
-4. Use this inventory to avoid suggesting duplicates
-
-## Version Comparison Process
-
-1. For each local prompt file, construct the raw GitHub URL to fetch the remote version:
- - Pattern: `https://raw.githubusercontent.com/github/awesome-copilot/main/prompts/`
-2. Fetch the remote version using the `#fetch` tool
-3. Compare entire file content (including front matter and body)
-4. Identify specific differences:
- - **Front matter changes** (description, tools, mode)
- - **Tools array modifications** (added, removed, or renamed tools)
- - **Content updates** (instructions, examples, guidelines)
-5. Document key differences for outdated prompts
-6. Calculate similarity to determine if update is needed
-
-## Requirements
-
-- Use `githubRepo` tool to get content from awesome-copilot repository prompts folder
-- Scan local file system for existing prompts in `.github/prompts/` directory
-- Read YAML front matter from local prompt files to extract descriptions
-- Compare local prompts with remote versions to detect outdated prompts
-- Compare against existing prompts in this repository to avoid duplicates
-- Focus on gaps in current prompt library coverage
-- Validate that suggested prompts align with repository's purpose and standards
-- Provide clear rationale for each suggestion
-- Include links to both awesome-copilot prompts and similar local prompts
-- Clearly identify outdated prompts with specific differences noted
-- Don't provide any additional information or context beyond the table and the analysis
-
-
-## Icons Reference
-
-- β Already installed and up-to-date
-- β οΈ Installed but outdated (update available)
-- β Not installed in repo
-
-## Update Handling
-
-When outdated prompts are identified:
-1. Include them in the output table with β οΈ status
-2. Document specific differences in the "Suggestion Rationale" column
-3. Provide recommendation to update with key changes noted
-4. When user requests update, replace entire local file with remote version
-5. Preserve file location in `.github/prompts/` directory
diff --git a/website/astro.config.mjs b/website/astro.config.mjs
index 76a8d50a..663a7d32 100644
--- a/website/astro.config.mjs
+++ b/website/astro.config.mjs
@@ -5,8 +5,8 @@ import pagefindResources from "./src/integrations/pagefind-resources";
// https://astro.build/config
export default defineConfig({
- site: "https://github.github.com/",
- base: "/awesome-copilot/",
+ site: "https://awesome-copilot.github.com/",
+ base: "/",
output: "static",
integrations: [
starlight({
@@ -35,6 +35,7 @@ export default defineConfig({
{ label: "Workflows", link: "/workflows/" },
{ label: "Plugins", link: "/plugins/" },
{ label: "Tools", link: "/tools/" },
+ { label: "Contributors", link: "/contributors/" },
],
},
{
@@ -75,6 +76,7 @@ export default defineConfig({
tableOfContents: { minHeadingLevel: 2, maxHeadingLevel: 3 },
components: {
Head: "./src/components/Head.astro",
+ Footer: "./src/components/Footer.astro",
},
}),
sitemap(),
diff --git a/website/data/tools.yml b/website/data/tools.yml
index 0f7fa18f..831ab07d 100644
--- a/website/data/tools.yml
+++ b/website/data/tools.yml
@@ -259,3 +259,51 @@ tools:
- copilot
- agent
- manager
+
+ - id: groundhog-day
+ name: Groundhog Day
+ description: >-
+ Autonomous backup agent for GitHub Copilot CLI skills. Watches your
+ ~/.copilot/skills/ directory in real time, commits every change with
+ meaningful messages, and pushes to GitHub automatically. Starts on boot
+ via macOS LaunchAgent, restarts if it crashes, and requires zero
+ interaction after setup. One-line installer creates a backup repo,
+ seeds it with existing skills, and starts the watcher. Includes a daily
+ health check, sync locking, push retry, and empty-source protection.
+ category: CLI Tools
+ featured: false
+ requirements:
+ - macOS or Linux
+ - GitHub CLI (gh) authenticated
+ - git
+ - fswatch (installed automatically via Homebrew on macOS)
+ links:
+ github: https://github.com/DUBSOpenHub/groundhog-day
+ features:
+ - "π Real-Time Watch: Detects every create, edit, rename, and delete in your skills directory using fswatch"
+ - "π Auto Sync: Commits and pushes changes to GitHub with meaningful commit messages"
+ - "π₯ Daily Health Check: Automated 6 AM checkup validates watcher, repo state, remote sync, and log health"
+ - "β‘ One-Line Install: curl installer creates a backup repo, seeds existing skills, and starts the watcher"
+ - "π‘οΈ Built-In Safety: Sync locking, push retry, empty-source guard, and graceful shutdown"
+ - "π Survives Reboots: macOS LaunchAgent keeps it alive across restarts"
+ configuration:
+ type: bash
+ content: |
+ # Install in one line
+ curl -fsSL https://raw.githubusercontent.com/DUBSOpenHub/groundhog-day/main/install.sh | bash
+
+ # Or install with Homebrew
+ brew install DUBSOpenHub/tap/groundhog-day
+
+ # Check status
+ groundhog status
+
+ # Run a manual health check
+ groundhog checkup
+ tags:
+ - cli
+ - backup
+ - skills
+ - automation
+ - macos
+ - launchagent
diff --git a/website/package-lock.json b/website/package-lock.json
index 50914c9a..ac39caf7 100644
--- a/website/package-lock.json
+++ b/website/package-lock.json
@@ -13,7 +13,11 @@
"@astrojs/starlight": "^0.37.6",
"astro": "^5.16.15",
"choices.js": "^11.1.0",
- "jszip": "^3.10.1"
+ "front-matter": "^4.0.2",
+ "gray-matter": "^4.0.3",
+ "jszip": "^3.10.1",
+ "marked": "^17.0.4",
+ "shiki": "^3.23.0"
}
},
"node_modules/@astrojs/compiler": {
@@ -1664,60 +1668,60 @@
]
},
"node_modules/@shikijs/core": {
- "version": "3.21.0",
- "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-3.21.0.tgz",
- "integrity": "sha512-AXSQu/2n1UIQekY8euBJlvFYZIw0PHY63jUzGbrOma4wPxzznJXTXkri+QcHeBNaFxiiOljKxxJkVSoB3PjbyA==",
+ "version": "3.23.0",
+ "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-3.23.0.tgz",
+ "integrity": "sha512-NSWQz0riNb67xthdm5br6lAkvpDJRTgB36fxlo37ZzM2yq0PQFFzbd8psqC2XMPgCzo1fW6cVi18+ArJ44wqgA==",
"license": "MIT",
"dependencies": {
- "@shikijs/types": "3.21.0",
+ "@shikijs/types": "3.23.0",
"@shikijs/vscode-textmate": "^10.0.2",
"@types/hast": "^3.0.4",
"hast-util-to-html": "^9.0.5"
}
},
"node_modules/@shikijs/engine-javascript": {
- "version": "3.21.0",
- "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-3.21.0.tgz",
- "integrity": "sha512-ATwv86xlbmfD9n9gKRiwuPpWgPENAWCLwYCGz9ugTJlsO2kOzhOkvoyV/UD+tJ0uT7YRyD530x6ugNSffmvIiQ==",
+ "version": "3.23.0",
+ "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-3.23.0.tgz",
+ "integrity": "sha512-aHt9eiGFobmWR5uqJUViySI1bHMqrAgamWE1TYSUoftkAeCCAiGawPMwM+VCadylQtF4V3VNOZ5LmfItH5f3yA==",
"license": "MIT",
"dependencies": {
- "@shikijs/types": "3.21.0",
+ "@shikijs/types": "3.23.0",
"@shikijs/vscode-textmate": "^10.0.2",
"oniguruma-to-es": "^4.3.4"
}
},
"node_modules/@shikijs/engine-oniguruma": {
- "version": "3.21.0",
- "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.21.0.tgz",
- "integrity": "sha512-OYknTCct6qiwpQDqDdf3iedRdzj6hFlOPv5hMvI+hkWfCKs5mlJ4TXziBG9nyabLwGulrUjHiCq3xCspSzErYQ==",
+ "version": "3.23.0",
+ "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.23.0.tgz",
+ "integrity": "sha512-1nWINwKXxKKLqPibT5f4pAFLej9oZzQTsby8942OTlsJzOBZ0MWKiwzMsd+jhzu8YPCHAswGnnN1YtQfirL35g==",
"license": "MIT",
"dependencies": {
- "@shikijs/types": "3.21.0",
+ "@shikijs/types": "3.23.0",
"@shikijs/vscode-textmate": "^10.0.2"
}
},
"node_modules/@shikijs/langs": {
- "version": "3.21.0",
- "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.21.0.tgz",
- "integrity": "sha512-g6mn5m+Y6GBJ4wxmBYqalK9Sp0CFkUqfNzUy2pJglUginz6ZpWbaWjDB4fbQ/8SHzFjYbtU6Ddlp1pc+PPNDVA==",
+ "version": "3.23.0",
+ "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.23.0.tgz",
+ "integrity": "sha512-2Ep4W3Re5aB1/62RSYQInK9mM3HsLeB91cHqznAJMuylqjzNVAVCMnNWRHFtcNHXsoNRayP9z1qj4Sq3nMqYXg==",
"license": "MIT",
"dependencies": {
- "@shikijs/types": "3.21.0"
+ "@shikijs/types": "3.23.0"
}
},
"node_modules/@shikijs/themes": {
- "version": "3.21.0",
- "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.21.0.tgz",
- "integrity": "sha512-BAE4cr9EDiZyYzwIHEk7JTBJ9CzlPuM4PchfcA5ao1dWXb25nv6hYsoDiBq2aZK9E3dlt3WB78uI96UESD+8Mw==",
+ "version": "3.23.0",
+ "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.23.0.tgz",
+ "integrity": "sha512-5qySYa1ZgAT18HR/ypENL9cUSGOeI2x+4IvYJu4JgVJdizn6kG4ia5Q1jDEOi7gTbN4RbuYtmHh0W3eccOrjMA==",
"license": "MIT",
"dependencies": {
- "@shikijs/types": "3.21.0"
+ "@shikijs/types": "3.23.0"
}
},
"node_modules/@shikijs/types": {
- "version": "3.21.0",
- "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.21.0.tgz",
- "integrity": "sha512-zGrWOxZ0/+0ovPY7PvBU2gIS9tmhSUUt30jAcNV0Bq0gb2S98gwfjIs1vxlmH5zM7/4YxLamT6ChlqqAJmPPjA==",
+ "version": "3.23.0",
+ "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.23.0.tgz",
+ "integrity": "sha512-3JZ5HXOZfYjsYSk0yPwBrkupyYSLpAE26Qc0HLghhZNGTZg/SKxXIIgoxOpmmeQP0RRSDJTk1/vPfw9tbw+jSQ==",
"license": "MIT",
"dependencies": {
"@shikijs/vscode-textmate": "^10.0.2",
@@ -2775,6 +2779,19 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "license": "BSD-2-Clause",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
"node_modules/estree-util-attach-comments": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz",
@@ -2890,6 +2907,18 @@
"integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
"license": "MIT"
},
+ "node_modules/extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==",
+ "license": "MIT",
+ "dependencies": {
+ "is-extendable": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/fdir": {
"version": "6.5.0",
"resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz",
@@ -2937,6 +2966,37 @@
"node": ">=20"
}
},
+ "node_modules/front-matter": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/front-matter/-/front-matter-4.0.2.tgz",
+ "integrity": "sha512-I8ZuJ/qG92NWX8i5x1Y8qyj3vizhXS31OxjKDu3LKP+7/qBgfIKValiZIEwoVoJKUHlhWtYrktkxV1XsX+pPlg==",
+ "license": "MIT",
+ "dependencies": {
+ "js-yaml": "^3.13.1"
+ }
+ },
+ "node_modules/front-matter/node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "license": "MIT",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/front-matter/node_modules/js-yaml": {
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
+ "license": "MIT",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
@@ -2978,6 +3038,43 @@
"integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==",
"license": "ISC"
},
+ "node_modules/gray-matter": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz",
+ "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==",
+ "license": "MIT",
+ "dependencies": {
+ "js-yaml": "^3.13.1",
+ "kind-of": "^6.0.2",
+ "section-matter": "^1.0.0",
+ "strip-bom-string": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
+ "node_modules/gray-matter/node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "license": "MIT",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/gray-matter/node_modules/js-yaml": {
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
+ "license": "MIT",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
"node_modules/h3": {
"version": "1.15.5",
"resolved": "https://registry.npmjs.org/h3/-/h3-1.15.5.tgz",
@@ -3501,6 +3598,15 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
@@ -3595,6 +3701,15 @@
"setimmediate": "^1.0.5"
}
},
+ "node_modules/kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/kleur": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz",
@@ -3683,6 +3798,18 @@
"url": "https://github.com/sponsors/wooorm"
}
},
+ "node_modules/marked": {
+ "version": "17.0.4",
+ "resolved": "https://registry.npmjs.org/marked/-/marked-17.0.4.tgz",
+ "integrity": "sha512-NOmVMM+KAokHMvjWmC5N/ZOvgmSWuqJB8FoYI019j4ogb/PeRMKoKIjReZ2w3376kkA8dSJIP8uD993Kxc0iRQ==",
+ "license": "MIT",
+ "bin": {
+ "marked": "bin/marked.js"
+ },
+ "engines": {
+ "node": ">= 20"
+ }
+ },
"node_modules/mdast-util-definitions": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-6.0.0.tgz",
@@ -5580,6 +5707,19 @@
"node": ">=11.0.0"
}
},
+ "node_modules/section-matter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz",
+ "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==",
+ "license": "MIT",
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
"node_modules/semver": {
"version": "7.7.3",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
@@ -5644,17 +5784,17 @@
}
},
"node_modules/shiki": {
- "version": "3.21.0",
- "resolved": "https://registry.npmjs.org/shiki/-/shiki-3.21.0.tgz",
- "integrity": "sha512-N65B/3bqL/TI2crrXr+4UivctrAGEjmsib5rPMMPpFp1xAx/w03v8WZ9RDDFYteXoEgY7qZ4HGgl5KBIu1153w==",
+ "version": "3.23.0",
+ "resolved": "https://registry.npmjs.org/shiki/-/shiki-3.23.0.tgz",
+ "integrity": "sha512-55Dj73uq9ZXL5zyeRPzHQsK7Nbyt6Y10k5s7OjuFZGMhpp4r/rsLBH0o/0fstIzX1Lep9VxefWljK/SKCzygIA==",
"license": "MIT",
"dependencies": {
- "@shikijs/core": "3.21.0",
- "@shikijs/engine-javascript": "3.21.0",
- "@shikijs/engine-oniguruma": "3.21.0",
- "@shikijs/langs": "3.21.0",
- "@shikijs/themes": "3.21.0",
- "@shikijs/types": "3.21.0",
+ "@shikijs/core": "3.23.0",
+ "@shikijs/engine-javascript": "3.23.0",
+ "@shikijs/engine-oniguruma": "3.23.0",
+ "@shikijs/langs": "3.23.0",
+ "@shikijs/themes": "3.23.0",
+ "@shikijs/types": "3.23.0",
"@shikijs/vscode-textmate": "^10.0.2",
"@types/hast": "^3.0.4"
}
@@ -5730,6 +5870,12 @@
"url": "https://github.com/sponsors/wooorm"
}
},
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==",
+ "license": "BSD-3-Clause"
+ },
"node_modules/stream-replace-string": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/stream-replace-string/-/stream-replace-string-2.0.0.tgz",
@@ -5791,6 +5937,15 @@
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
+ "node_modules/strip-bom-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz",
+ "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/style-to-js": {
"version": "1.1.21",
"resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz",
diff --git a/website/package.json b/website/package.json
index dc789707..8f652655 100644
--- a/website/package.json
+++ b/website/package.json
@@ -22,6 +22,10 @@
"@astrojs/starlight": "^0.37.6",
"astro": "^5.16.15",
"choices.js": "^11.1.0",
- "jszip": "^3.10.1"
+ "front-matter": "^4.0.2",
+ "gray-matter": "^4.0.3",
+ "jszip": "^3.10.1",
+ "marked": "^17.0.4",
+ "shiki": "^3.23.0"
}
}
diff --git a/website/src/components/EmbeddedPageData.astro b/website/src/components/EmbeddedPageData.astro
new file mode 100644
index 00000000..61a03455
--- /dev/null
+++ b/website/src/components/EmbeddedPageData.astro
@@ -0,0 +1,19 @@
+---
+import {
+ getEmbeddedDataElementId,
+ serializeEmbeddedData,
+} from "../scripts/embedded-data";
+
+interface Props {
+ filename: string;
+ data: unknown;
+}
+
+const { filename, data } = Astro.props;
+---
+
+
diff --git a/website/src/components/Footer.astro b/website/src/components/Footer.astro
new file mode 100644
index 00000000..9b857bf0
--- /dev/null
+++ b/website/src/components/Footer.astro
@@ -0,0 +1,84 @@
+---
+import EditLink from "@astrojs/starlight/components/EditLink.astro";
+import LastUpdated from "@astrojs/starlight/components/LastUpdated.astro";
+import Pagination from "@astrojs/starlight/components/Pagination.astro";
+import config from "virtual:starlight/user-config";
+import { Icon } from "@astrojs/starlight/components";
+---
+
+
+
+
diff --git a/website/src/components/Head.astro b/website/src/components/Head.astro
index bf25df34..73dc360d 100644
--- a/website/src/components/Head.astro
+++ b/website/src/components/Head.astro
@@ -9,3 +9,16 @@ const basePath = import.meta.env.BASE_URL;
document.body.dataset.basePath = basePath;
});
+{import.meta.env.PROD && (
+ <>
+
+
+ >
+)}
diff --git a/website/src/components/Modal.astro b/website/src/components/Modal.astro
index 40b283dc..59c401bd 100644
--- a/website/src/components/Modal.astro
+++ b/website/src/components/Modal.astro
@@ -2,57 +2,220 @@
// Modal component for viewing file contents
---
-
diff --git a/website/src/content/docs/learning-hub/index.md b/website/src/content/docs/learning-hub/index.md
index 538e51ec..b0eff9e9 100644
--- a/website/src/content/docs/learning-hub/index.md
+++ b/website/src/content/docs/learning-hub/index.md
@@ -1,32 +1,23 @@
---
title: Learning Hub
description: 'Curated articles, walkthroughs, and reference material to help you unlock everything you can do with GitHub Copilot'
-template: splash
-hero:
- tagline: 'Curated articles, walkthroughs, and reference material to help you unlock everything you can do with GitHub Copilot'
- actions:
- - text: Start with Fundamentals
- link: /learning-hub/what-are-agents-skills-instructions/
- icon: right-arrow
-sidebar:
- hidden: true
tableOfContents: false
---
## Fundamentals
Essential concepts to tailor GitHub Copilot beyond its default experience. Start with
-[What are Agents, Skills, and Instructions](/learning-hub/what-are-agents-skills-instructions/)
+[What are Agents, Skills, and Instructions](what-are-agents-skills-instructions/)
and work through the full track to master every customization primitive.
## Reference
Quick-lookup resources to keep handy while you work. Browse the
-[GitHub Copilot Terminology Glossary](/learning-hub/github-copilot-terminology-glossary/)
+[GitHub Copilot Terminology Glossary](github-copilot-terminology-glossary/)
for definitions of common terms and concepts.
## Hands-on
Interactive samples and recipes to learn by doing. Jump into the
-[Cookbook](/learning-hub/cookbook/) for code samples, recipes,
+[Cookbook](cookbook/) for code samples, recipes,
and examples you can use right away.
diff --git a/website/src/content/learning-hub/installing-and-using-plugins.md b/website/src/content/learning-hub/installing-and-using-plugins.md
index 94eb44d5..28bc8795 100644
--- a/website/src/content/learning-hub/installing-and-using-plugins.md
+++ b/website/src/content/learning-hub/installing-and-using-plugins.md
@@ -94,12 +94,12 @@ Plugins are especially valuable when you want to:
## Finding Plugins
-Plugins are collected in **marketplaces** β registries you can browse and install from. Copilot CLI comes with two marketplaces registered by default:
+Plugins are collected in **marketplaces** β registries you can browse and install from. Both Copilot CLI and VS Code come with two marketplaces registered by default β **no setup required**:
- **`copilot-plugins`** β Official GitHub Copilot plugins
- **`awesome-copilot`** β Community-contributed plugins from this repository
-### Browsing a Marketplace
+### Browsing in Copilot CLI
List your registered marketplaces:
@@ -121,6 +121,13 @@ Or from within an interactive Copilot session:
> **Tip**: You can also browse plugins on this site's [Plugins Directory](../../plugins/) to see descriptions, included agents, and skills before installing.
+### Browsing in VS Code
+
+Because `awesome-copilot` is a default marketplace in VS Code, you can discover plugins without any configuration:
+
+- Open the **Extensions** search view and type **`@agentPlugins`** to see all available plugins
+- Or open the **Command Palette** (`Ctrl+Shift+P` / `Cmd+Shift+P`) and run **Chat: Plugins**
+
### Adding More Marketplaces
Register additional marketplaces from GitHub repositories:
@@ -137,9 +144,9 @@ copilot plugin marketplace add /path/to/local-marketplace
## Installing Plugins
-### From a Registered Marketplace
+### From Copilot CLI
-The most common way to install a plugin β reference it by name and marketplace:
+Reference a plugin by name and marketplace:
```bash
copilot plugin install database-data-management@awesome-copilot
@@ -151,6 +158,10 @@ Or from an interactive session:
/plugin install database-data-management@awesome-copilot
```
+### From VS Code
+
+Browse to the plugin via `@agentPlugins` in the Extensions search view or via **Chat: Plugins** in the Command Palette, then click **Install**.
+
## Managing Plugins
Once installed, plugins are managed with a few simple commands:
diff --git a/website/src/integrations/pagefind-resources.ts b/website/src/integrations/pagefind-resources.ts
index 1a7efab5..95a56335 100644
--- a/website/src/integrations/pagefind-resources.ts
+++ b/website/src/integrations/pagefind-resources.ts
@@ -86,7 +86,7 @@ export default function pagefindResources(): AstroIntegration {
records = [];
}
- // Use the base path from Astro config (e.g. "/awesome-copilot/")
+ // Use the base path from Astro config (e.g. "/")
const base = siteBase.endsWith("/") ? siteBase : `${siteBase}/`;
let added = 0;
diff --git a/website/src/pages/agents.astro b/website/src/pages/agents.astro
index d55a0788..f1ca6f42 100644
--- a/website/src/pages/agents.astro
+++ b/website/src/pages/agents.astro
@@ -1,8 +1,13 @@
---
import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro';
+import agentsData from '../../public/data/agents.json';
import Modal from '../components/Modal.astro';
import ContributeCTA from '../components/ContributeCTA.astro';
+import EmbeddedPageData from '../components/EmbeddedPageData.astro';
import PageHeader from '../components/PageHeader.astro';
+import { renderAgentsHtml, sortAgents } from '../scripts/pages/agents-render';
+
+const initialItems = sortAgents(agentsData.items, 'title');
---
@@ -11,47 +16,48 @@ import PageHeader from '../components/PageHeader.astro';
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Loading agents...
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
{initialItems.length} of {initialItems.length} agents
+
+
diff --git a/website/src/pages/index.astro b/website/src/pages/index.astro
index 7ed485a9..9b6e37a9 100644
--- a/website/src/pages/index.astro
+++ b/website/src/pages/index.astro
@@ -92,39 +92,6 @@ const base = import.meta.env.BASE_URL;
-
-
-
-
Featured Plugins
-
-
-
-
-
-
-
-
-
-
Getting Started
-
-
-
1
-
Browse
-
Explore agents, instructions, skills, and plugins
-
-
-
2
-
Preview
-
Click any item to view its full content
-
-
-
3
-
Install
-
One-click install to VS Code or copy to clipboard
-
-
-
-
diff --git a/website/src/pages/instructions.astro b/website/src/pages/instructions.astro
index 5595c023..3819e4a4 100644
--- a/website/src/pages/instructions.astro
+++ b/website/src/pages/instructions.astro
@@ -1,8 +1,13 @@
---
import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro';
+import instructionsData from '../../public/data/instructions.json';
import Modal from '../components/Modal.astro';
import ContributeCTA from '../components/ContributeCTA.astro';
+import EmbeddedPageData from '../components/EmbeddedPageData.astro';
import PageHeader from '../components/PageHeader.astro';
+import { renderInstructionsHtml, sortInstructions } from '../scripts/pages/instructions-render';
+
+const initialItems = sortInstructions(instructionsData.items, 'title');
---
@@ -11,36 +16,37 @@ import PageHeader from '../components/PageHeader.astro';
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Loading instructions...
-
+
{initialItems.length} of {initialItems.length} instructions
+
+
+
+
diff --git a/website/src/pages/skills.astro b/website/src/pages/skills.astro
index 6ca0d001..f08a66c4 100644
--- a/website/src/pages/skills.astro
+++ b/website/src/pages/skills.astro
@@ -3,6 +3,11 @@ import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro';
import Modal from '../components/Modal.astro';
import ContributeCTA from '../components/ContributeCTA.astro';
import PageHeader from '../components/PageHeader.astro';
+import EmbeddedPageData from '../components/EmbeddedPageData.astro';
+import skillsData from '../../public/data/skills.json';
+import { renderSkillsHtml, sortSkills } from '../scripts/pages/skills-render';
+
+const initialItems = sortSkills(skillsData.items, 'title');
---
@@ -11,36 +16,36 @@ import PageHeader from '../components/PageHeader.astro';
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Loading skills...
-
+
{initialItems.length} of {initialItems.length} skills
+
@@ -48,6 +53,7 @@ import PageHeader from '../components/PageHeader.astro';
+
diff --git a/website/src/pages/tools.astro b/website/src/pages/tools.astro
index f011bb6c..22ed6602 100644
--- a/website/src/pages/tools.astro
+++ b/website/src/pages/tools.astro
@@ -1,7 +1,15 @@
---
import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro';
+import toolsData from '../../public/data/tools.json';
import ContributeCTA from "../components/ContributeCTA.astro";
+import EmbeddedPageData from "../components/EmbeddedPageData.astro";
import PageHeader from "../components/PageHeader.astro";
+import { renderToolsHtml } from "../scripts/pages/tools-render";
+
+const initialItems = toolsData.items.map((item) => ({
+ ...item,
+ title: item.name,
+}));
---
@@ -24,15 +32,18 @@ import PageHeader from "../components/PageHeader.astro";
-
+
{initialItems.length} of {initialItems.length} tools
-
+
More Tools Coming Soon
@@ -46,6 +57,8 @@ import PageHeader from "../components/PageHeader.astro";
+
+
diff --git a/website/src/pages/workflows.astro b/website/src/pages/workflows.astro
index dd4f4663..cdf22cbd 100644
--- a/website/src/pages/workflows.astro
+++ b/website/src/pages/workflows.astro
@@ -1,8 +1,13 @@
---
import StarlightPage from '@astrojs/starlight/components/StarlightPage.astro';
+import workflowsData from '../../public/data/workflows.json';
import Modal from '../components/Modal.astro';
import ContributeCTA from '../components/ContributeCTA.astro';
+import EmbeddedPageData from '../components/EmbeddedPageData.astro';
import PageHeader from '../components/PageHeader.astro';
+import { renderWorkflowsHtml, sortWorkflows } from '../scripts/pages/workflows-render';
+
+const initialItems = sortWorkflows(workflowsData.items, 'title');
---
@@ -13,36 +18,37 @@ import PageHeader from '../components/PageHeader.astro';
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Loading workflows...
-
+
{initialItems.length} of {initialItems.length} workflows