From 9ef6a28a3cfbc1c96a5e8e3bb440979a2862ad3d Mon Sep 17 00:00:00 2001
From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com>
Date: Thu, 19 Feb 2026 04:36:30 +0000
Subject: [PATCH] Remove prompts infrastructure from build scripts
Co-authored-by: aaronpowell <434140+aaronpowell@users.noreply.github.com>
---
docs/README.plugins.md | 4 +-
docs/README.prompts.md | 158 ---
docs/README.skills.md | 142 +++
eng/constants.mjs | 24 +-
eng/materialize-plugins.mjs | 29 +-
eng/update-readme.mjs | 67 --
eng/validate-plugins.mjs | 1 -
prompts/add-educational-comments.prompt.md | 129 --
...prompt-engineering-safety-review.prompt.md | 230 ----
prompts/apple-appstore-reviewer.prompt.md | 307 -----
prompts/arch-linux-triage.prompt.md | 33 -
...architecture-blueprint-generator.prompt.md | 322 -----
prompts/aspnet-minimal-api-openapi.prompt.md | 42 -
prompts/az-cost-optimize.prompt.md | 305 -----
.../azure-resource-health-diagnose.prompt.md | 290 -----
prompts/boost-prompt.prompt.md | 25 -
prompts/breakdown-epic-arch.prompt.md | 66 --
prompts/breakdown-epic-pm.prompt.md | 58 -
...breakdown-feature-implementation.prompt.md | 128 --
prompts/breakdown-feature-prd.prompt.md | 61 -
prompts/breakdown-plan.prompt.md | 509 --------
prompts/breakdown-test.prompt.md | 365 ------
prompts/centos-linux-triage.prompt.md | 33 -
...de-exemplars-blueprint-generator.prompt.md | 126 --
...comment-code-generate-a-tutorial.prompt.md | 26 -
.../containerize-aspnet-framework.prompt.md | 455 -------
prompts/containerize-aspnetcore.prompt.md | 393 -------
prompts/context-map.prompt.md | 53 -
prompts/conventional-commit.prompt.md | 72 --
prompts/convert-plaintext-to-md.prompt.md | 363 ------
...instructions-blueprint-generator.prompt.md | 294 -----
prompts/cosmosdb-datamodeling.prompt.md | 1045 -----------------
prompts/create-agentsmd.prompt.md | 249 ----
...te-architectural-decision-record.prompt.md | 97 --
...ub-action-workflow-specification.prompt.md | 276 -----
...issue-feature-from-specification.prompt.md | 28 -
...feature-from-implementation-plan.prompt.md | 28 -
...unmet-specification-requirements.prompt.md | 35 -
...-pull-request-from-specification.prompt.md | 24 -
prompts/create-implementation-plan.prompt.md | 157 ---
prompts/create-llms.prompt.md | 210 ----
...reate-oo-component-documentation.prompt.md | 193 ---
prompts/create-readme.prompt.md | 21 -
prompts/create-specification.prompt.md | 127 --
.../create-spring-boot-java-project.prompt.md | 163 ---
...reate-spring-boot-kotlin-project.prompt.md | 147 ---
prompts/create-technical-spike.prompt.md | 231 ----
prompts/create-tldr-page.prompt.md | 211 ----
prompts/csharp-async.prompt.md | 50 -
prompts/csharp-docs.prompt.md | 63 -
prompts/csharp-mcp-server-generator.prompt.md | 59 -
prompts/csharp-mstest.prompt.md | 479 --------
prompts/csharp-nunit.prompt.md | 72 --
prompts/csharp-tunit.prompt.md | 101 --
prompts/csharp-xunit.prompt.md | 69 --
...taverse-python-advanced-patterns.prompt.md | 16 -
...dataverse-python-production-code.prompt.md | 116 --
prompts/dataverse-python-quickstart.prompt.md | 13 -
...dataverse-python-usecase-builder.prompt.md | 246 ----
prompts/debian-linux-triage.prompt.md | 33 -
prompts/declarative-agents.prompt.md | 93 --
prompts/devops-rollout-plan.prompt.md | 118 --
prompts/documentation-writer.prompt.md | 46 -
prompts/dotnet-best-practices.prompt.md | 84 --
.../dotnet-design-pattern-review.prompt.md | 41 -
prompts/dotnet-upgrade.prompt.md | 115 --
prompts/editorconfig.prompt.md | 64 -
prompts/ef-core.prompt.md | 76 --
prompts/fedora-linux-triage.prompt.md | 33 -
prompts/finalize-agent-prompt.prompt.md | 27 -
prompts/first-ask.prompt.md | 29 -
...er-structure-blueprint-generator.prompt.md | 405 -------
prompts/gen-specs-as-issues.prompt.md | 165 ---
...ustom-instructions-from-codebase.prompt.md | 240 ----
prompts/git-flow-branch-creator.prompt.md | 293 -----
prompts/github-copilot-starter.prompt.md | 372 ------
prompts/go-mcp-server-generator.prompt.md | 334 ------
...add-graalvm-native-image-support.prompt.md | 456 -------
prompts/java-docs.prompt.md | 24 -
prompts/java-junit.prompt.md | 64 -
prompts/java-mcp-server-generator.prompt.md | 756 ------------
.../java-refactoring-extract-method.prompt.md | 105 --
...ava-refactoring-remove-parameter.prompt.md | 85 --
prompts/java-springboot.prompt.md | 66 --
prompts/javascript-typescript-jest.prompt.md | 44 -
prompts/kotlin-mcp-server-generator.prompt.md | 449 -------
prompts/kotlin-springboot.prompt.md | 71 --
...-copilot-studio-server-generator.prompt.md | 118 --
prompts/mcp-create-adaptive-cards.prompt.md | 527 ---------
.../mcp-create-declarative-agent.prompt.md | 310 -----
prompts/mcp-deploy-manage-agents.prompt.md | 336 ------
prompts/memory-merger.prompt.md | 107 --
prompts/mkdocs-translations.prompt.md | 110 --
prompts/model-recommendation.prompt.md | 677 -----------
prompts/multi-stage-dockerfile.prompt.md | 47 -
prompts/my-issues.prompt.md | 9 -
prompts/my-pull-requests.prompt.md | 15 -
prompts/next-intl-add-language.prompt.md | 20 -
prompts/openapi-to-application-code.prompt.md | 114 --
prompts/php-mcp-server-generator.prompt.md | 522 --------
...aywright-automation-fill-in-form.prompt.md | 30 -
prompts/playwright-explore-website.prompt.md | 19 -
prompts/playwright-generate-test.prompt.md | 19 -
prompts/postgresql-code-review.prompt.md | 214 ----
prompts/postgresql-optimization.prompt.md | 406 -------
.../power-apps-code-app-scaffold.prompt.md | 150 ---
prompts/power-bi-dax-optimization.prompt.md | 175 ---
.../power-bi-model-design-review.prompt.md | 405 -------
...r-bi-performance-troubleshooting.prompt.md | 384 ------
...er-bi-report-design-consultation.prompt.md | 353 ------
...wer-platform-mcp-connector-suite.prompt.md | 156 ---
...low-analysis-blueprint-generator.prompt.md | 294 -----
prompts/prompt-builder.prompt.md | 142 ---
prompts/pytest-coverage.prompt.md | 28 -
prompts/python-mcp-server-generator.prompt.md | 105 --
prompts/readme-blueprint-generator.prompt.md | 79 --
...efactor-method-complexity-reduce.prompt.md | 102 --
prompts/refactor-plan.prompt.md | 66 --
...remember-interactive-programming.prompt.md | 13 -
prompts/remember.prompt.md | 125 --
prompts/repo-story-time.prompt.md | 156 ---
prompts/review-and-refactor.prompt.md | 15 -
prompts/ruby-mcp-server-generator.prompt.md | 660 -----------
prompts/rust-mcp-server-generator.prompt.md | 578 ---------
prompts/shuffle-json-data.prompt.md | 151 ---
prompts/sql-code-review.prompt.md | 303 -----
prompts/sql-optimization.prompt.md | 298 -----
.../structured-autonomy-generate.prompt.md | 127 --
.../structured-autonomy-implement.prompt.md | 21 -
prompts/structured-autonomy-plan.prompt.md | 83 --
...st-awesome-github-copilot-agents.prompt.md | 107 --
...some-github-copilot-instructions.prompt.md | 122 --
...t-awesome-github-copilot-prompts.prompt.md | 106 --
...st-awesome-github-copilot-skills.prompt.md | 130 --
prompts/swift-mcp-server-generator.prompt.md | 669 -----------
...nology-stack-blueprint-generator.prompt.md | 242 ----
prompts/tldr-prompt.prompt.md | 306 -----
.../typescript-mcp-server-generator.prompt.md | 90 --
prompts/typespec-api-operations.prompt.md | 421 -------
prompts/typespec-create-agent.prompt.md | 94 --
prompts/typespec-create-api-plugin.prompt.md | 167 ---
prompts/update-avm-modules-in-bicep.prompt.md | 60 -
prompts/update-implementation-plan.prompt.md | 157 ---
prompts/update-llms.prompt.md | 216 ----
prompts/update-markdown-file-index.prompt.md | 76 --
...pdate-oo-component-documentation.prompt.md | 162 ---
prompts/update-specification.prompt.md | 127 --
prompts/what-context-needed.prompt.md | 40 -
...write-coding-standards-from-file.prompt.md | 317 -----
149 files changed, 148 insertions(+), 26354 deletions(-)
delete mode 100644 docs/README.prompts.md
delete mode 100644 prompts/add-educational-comments.prompt.md
delete mode 100644 prompts/ai-prompt-engineering-safety-review.prompt.md
delete mode 100644 prompts/apple-appstore-reviewer.prompt.md
delete mode 100644 prompts/arch-linux-triage.prompt.md
delete mode 100644 prompts/architecture-blueprint-generator.prompt.md
delete mode 100644 prompts/aspnet-minimal-api-openapi.prompt.md
delete mode 100644 prompts/az-cost-optimize.prompt.md
delete mode 100644 prompts/azure-resource-health-diagnose.prompt.md
delete mode 100644 prompts/boost-prompt.prompt.md
delete mode 100644 prompts/breakdown-epic-arch.prompt.md
delete mode 100644 prompts/breakdown-epic-pm.prompt.md
delete mode 100644 prompts/breakdown-feature-implementation.prompt.md
delete mode 100644 prompts/breakdown-feature-prd.prompt.md
delete mode 100644 prompts/breakdown-plan.prompt.md
delete mode 100644 prompts/breakdown-test.prompt.md
delete mode 100644 prompts/centos-linux-triage.prompt.md
delete mode 100644 prompts/code-exemplars-blueprint-generator.prompt.md
delete mode 100644 prompts/comment-code-generate-a-tutorial.prompt.md
delete mode 100644 prompts/containerize-aspnet-framework.prompt.md
delete mode 100644 prompts/containerize-aspnetcore.prompt.md
delete mode 100644 prompts/context-map.prompt.md
delete mode 100644 prompts/conventional-commit.prompt.md
delete mode 100644 prompts/convert-plaintext-to-md.prompt.md
delete mode 100644 prompts/copilot-instructions-blueprint-generator.prompt.md
delete mode 100644 prompts/cosmosdb-datamodeling.prompt.md
delete mode 100644 prompts/create-agentsmd.prompt.md
delete mode 100644 prompts/create-architectural-decision-record.prompt.md
delete mode 100644 prompts/create-github-action-workflow-specification.prompt.md
delete mode 100644 prompts/create-github-issue-feature-from-specification.prompt.md
delete mode 100644 prompts/create-github-issues-feature-from-implementation-plan.prompt.md
delete mode 100644 prompts/create-github-issues-for-unmet-specification-requirements.prompt.md
delete mode 100644 prompts/create-github-pull-request-from-specification.prompt.md
delete mode 100644 prompts/create-implementation-plan.prompt.md
delete mode 100644 prompts/create-llms.prompt.md
delete mode 100644 prompts/create-oo-component-documentation.prompt.md
delete mode 100644 prompts/create-readme.prompt.md
delete mode 100644 prompts/create-specification.prompt.md
delete mode 100644 prompts/create-spring-boot-java-project.prompt.md
delete mode 100644 prompts/create-spring-boot-kotlin-project.prompt.md
delete mode 100644 prompts/create-technical-spike.prompt.md
delete mode 100644 prompts/create-tldr-page.prompt.md
delete mode 100644 prompts/csharp-async.prompt.md
delete mode 100644 prompts/csharp-docs.prompt.md
delete mode 100644 prompts/csharp-mcp-server-generator.prompt.md
delete mode 100644 prompts/csharp-mstest.prompt.md
delete mode 100644 prompts/csharp-nunit.prompt.md
delete mode 100644 prompts/csharp-tunit.prompt.md
delete mode 100644 prompts/csharp-xunit.prompt.md
delete mode 100644 prompts/dataverse-python-advanced-patterns.prompt.md
delete mode 100644 prompts/dataverse-python-production-code.prompt.md
delete mode 100644 prompts/dataverse-python-quickstart.prompt.md
delete mode 100644 prompts/dataverse-python-usecase-builder.prompt.md
delete mode 100644 prompts/debian-linux-triage.prompt.md
delete mode 100644 prompts/declarative-agents.prompt.md
delete mode 100644 prompts/devops-rollout-plan.prompt.md
delete mode 100644 prompts/documentation-writer.prompt.md
delete mode 100644 prompts/dotnet-best-practices.prompt.md
delete mode 100644 prompts/dotnet-design-pattern-review.prompt.md
delete mode 100644 prompts/dotnet-upgrade.prompt.md
delete mode 100644 prompts/editorconfig.prompt.md
delete mode 100644 prompts/ef-core.prompt.md
delete mode 100644 prompts/fedora-linux-triage.prompt.md
delete mode 100644 prompts/finalize-agent-prompt.prompt.md
delete mode 100644 prompts/first-ask.prompt.md
delete mode 100644 prompts/folder-structure-blueprint-generator.prompt.md
delete mode 100644 prompts/gen-specs-as-issues.prompt.md
delete mode 100644 prompts/generate-custom-instructions-from-codebase.prompt.md
delete mode 100644 prompts/git-flow-branch-creator.prompt.md
delete mode 100644 prompts/github-copilot-starter.prompt.md
delete mode 100644 prompts/go-mcp-server-generator.prompt.md
delete mode 100644 prompts/java-add-graalvm-native-image-support.prompt.md
delete mode 100644 prompts/java-docs.prompt.md
delete mode 100644 prompts/java-junit.prompt.md
delete mode 100644 prompts/java-mcp-server-generator.prompt.md
delete mode 100644 prompts/java-refactoring-extract-method.prompt.md
delete mode 100644 prompts/java-refactoring-remove-parameter.prompt.md
delete mode 100644 prompts/java-springboot.prompt.md
delete mode 100644 prompts/javascript-typescript-jest.prompt.md
delete mode 100644 prompts/kotlin-mcp-server-generator.prompt.md
delete mode 100644 prompts/kotlin-springboot.prompt.md
delete mode 100644 prompts/mcp-copilot-studio-server-generator.prompt.md
delete mode 100644 prompts/mcp-create-adaptive-cards.prompt.md
delete mode 100644 prompts/mcp-create-declarative-agent.prompt.md
delete mode 100644 prompts/mcp-deploy-manage-agents.prompt.md
delete mode 100644 prompts/memory-merger.prompt.md
delete mode 100644 prompts/mkdocs-translations.prompt.md
delete mode 100644 prompts/model-recommendation.prompt.md
delete mode 100644 prompts/multi-stage-dockerfile.prompt.md
delete mode 100644 prompts/my-issues.prompt.md
delete mode 100644 prompts/my-pull-requests.prompt.md
delete mode 100644 prompts/next-intl-add-language.prompt.md
delete mode 100644 prompts/openapi-to-application-code.prompt.md
delete mode 100644 prompts/php-mcp-server-generator.prompt.md
delete mode 100644 prompts/playwright-automation-fill-in-form.prompt.md
delete mode 100644 prompts/playwright-explore-website.prompt.md
delete mode 100644 prompts/playwright-generate-test.prompt.md
delete mode 100644 prompts/postgresql-code-review.prompt.md
delete mode 100644 prompts/postgresql-optimization.prompt.md
delete mode 100644 prompts/power-apps-code-app-scaffold.prompt.md
delete mode 100644 prompts/power-bi-dax-optimization.prompt.md
delete mode 100644 prompts/power-bi-model-design-review.prompt.md
delete mode 100644 prompts/power-bi-performance-troubleshooting.prompt.md
delete mode 100644 prompts/power-bi-report-design-consultation.prompt.md
delete mode 100644 prompts/power-platform-mcp-connector-suite.prompt.md
delete mode 100644 prompts/project-workflow-analysis-blueprint-generator.prompt.md
delete mode 100644 prompts/prompt-builder.prompt.md
delete mode 100644 prompts/pytest-coverage.prompt.md
delete mode 100644 prompts/python-mcp-server-generator.prompt.md
delete mode 100644 prompts/readme-blueprint-generator.prompt.md
delete mode 100644 prompts/refactor-method-complexity-reduce.prompt.md
delete mode 100644 prompts/refactor-plan.prompt.md
delete mode 100644 prompts/remember-interactive-programming.prompt.md
delete mode 100644 prompts/remember.prompt.md
delete mode 100644 prompts/repo-story-time.prompt.md
delete mode 100644 prompts/review-and-refactor.prompt.md
delete mode 100644 prompts/ruby-mcp-server-generator.prompt.md
delete mode 100644 prompts/rust-mcp-server-generator.prompt.md
delete mode 100644 prompts/shuffle-json-data.prompt.md
delete mode 100644 prompts/sql-code-review.prompt.md
delete mode 100644 prompts/sql-optimization.prompt.md
delete mode 100644 prompts/structured-autonomy-generate.prompt.md
delete mode 100644 prompts/structured-autonomy-implement.prompt.md
delete mode 100644 prompts/structured-autonomy-plan.prompt.md
delete mode 100644 prompts/suggest-awesome-github-copilot-agents.prompt.md
delete mode 100644 prompts/suggest-awesome-github-copilot-instructions.prompt.md
delete mode 100644 prompts/suggest-awesome-github-copilot-prompts.prompt.md
delete mode 100644 prompts/suggest-awesome-github-copilot-skills.prompt.md
delete mode 100644 prompts/swift-mcp-server-generator.prompt.md
delete mode 100644 prompts/technology-stack-blueprint-generator.prompt.md
delete mode 100644 prompts/tldr-prompt.prompt.md
delete mode 100644 prompts/typescript-mcp-server-generator.prompt.md
delete mode 100644 prompts/typespec-api-operations.prompt.md
delete mode 100644 prompts/typespec-create-agent.prompt.md
delete mode 100644 prompts/typespec-create-api-plugin.prompt.md
delete mode 100644 prompts/update-avm-modules-in-bicep.prompt.md
delete mode 100644 prompts/update-implementation-plan.prompt.md
delete mode 100644 prompts/update-llms.prompt.md
delete mode 100644 prompts/update-markdown-file-index.prompt.md
delete mode 100644 prompts/update-oo-component-documentation.prompt.md
delete mode 100644 prompts/update-specification.prompt.md
delete mode 100644 prompts/what-context-needed.prompt.md
delete mode 100644 prompts/write-coding-standards-from-file.prompt.md
diff --git a/docs/README.plugins.md b/docs/README.plugins.md
index 6f679d2d..ca0bc4bc 100644
--- a/docs/README.plugins.md
+++ b/docs/README.plugins.md
@@ -1,12 +1,12 @@
# π Plugins
-Curated plugins of related prompts, agents, and skills organized around specific themes, workflows, or use cases. Plugins can be installed directly via GitHub Copilot CLI.
+Curated plugins of related agents and skills organized around specific themes, workflows, or use cases. Plugins can be installed directly via GitHub Copilot CLI.
### How to Use Plugins
**Browse Plugins:**
- β Featured plugins are highlighted and appear at the top of the list
- Explore themed plugins that group related customizations
-- Each plugin includes prompts, agents, and skills for specific workflows
+- Each plugin includes agents and skills for specific workflows
- Plugins make it easy to adopt comprehensive toolkits for particular scenarios
**Install Plugins:**
diff --git a/docs/README.prompts.md b/docs/README.prompts.md
deleted file mode 100644
index c618c44c..00000000
--- a/docs/README.prompts.md
+++ /dev/null
@@ -1,158 +0,0 @@
-# π― Reusable Prompts
-
-Ready-to-use prompt templates for specific development scenarios and tasks, defining prompt text with a specific mode, model, and available set of tools.
-### How to Use Reusable Prompts
-
-**To Install:**
-- Click the **VS Code** or **VS Code Insiders** install button for the prompt you want to use
-- Download the `*.prompt.md` file and manually add it to your prompt collection
-
-**To Run/Execute:**
-- Use `/prompt-name` in VS Code chat after installation
-- Run the `Chat: Run Prompt` command from the Command Palette
-- Hit the run button while you have a prompt file open in VS Code
-
-| Title | Description |
-| ----- | ----------- |
-| [.NET Upgrade Analysis Prompts](../prompts/dotnet-upgrade.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdotnet-upgrade.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdotnet-upgrade.prompt.md) | Ready-to-use prompts for comprehensive .NET framework upgrade analysis and execution |
-| [.NET/C# Best Practices](../prompts/dotnet-best-practices.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdotnet-best-practices.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdotnet-best-practices.prompt.md) | Ensure .NET/C# code meets best practices for the solution/project. |
-| [.NET/C# Design Pattern Review](../prompts/dotnet-design-pattern-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdotnet-design-pattern-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdotnet-design-pattern-review.prompt.md) | Review the C#/.NET code for design pattern implementation and suggest improvements. |
-| [Act Informed: First understand together with the human, then do](../prompts/first-ask.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ffirst-ask.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ffirst-ask.prompt.md) | Interactive, input-tool powered, task refinement workflow: interrogates scope, deliverables, constraints before carrying out the task; Requires the Joyride extension. |
-| [Add Educational Comments](../prompts/add-educational-comments.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fadd-educational-comments.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fadd-educational-comments.prompt.md) | Add educational comments to the file specified, or prompt asking for file to comment if one is not provided. |
-| [Add TypeSpec API Operations](../prompts/typespec-api-operations.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftypespec-api-operations.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftypespec-api-operations.prompt.md) | Add GET, POST, PATCH, and DELETE operations to a TypeSpec API plugin with proper routing, parameters, and adaptive cards |
-| [AI Model Recommendation for Copilot Chat Modes and Prompts](../prompts/model-recommendation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmodel-recommendation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmodel-recommendation.prompt.md) | Analyze chatmode or prompt files and recommend optimal AI models based on task complexity, required capabilities, and cost-efficiency |
-| [AI Prompt Engineering Safety Review & Improvement](../prompts/ai-prompt-engineering-safety-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fai-prompt-engineering-safety-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fai-prompt-engineering-safety-review.prompt.md) | Comprehensive AI prompt engineering safety review and improvement prompt. Analyzes prompts for safety, bias, security vulnerabilities, and effectiveness while providing detailed improvement recommendations with extensive frameworks, testing methodologies, and educational content. |
-| [Apple App Store Reviewer](../prompts/apple-appstore-reviewer.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fapple-appstore-reviewer.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fapple-appstore-reviewer.prompt.md) | Serves as a reviewer of the codebase with instructions on looking for Apple App Store optimizations or rejection reasons. |
-| [Arch Linux Triage](../prompts/arch-linux-triage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Farch-linux-triage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Farch-linux-triage.prompt.md) | Triage and resolve Arch Linux issues with pacman, systemd, and rolling-release best practices. |
-| [ASP.NET .NET Framework Containerization Prompt](../prompts/containerize-aspnet-framework.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcontainerize-aspnet-framework.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcontainerize-aspnet-framework.prompt.md) | Containerize an ASP.NET .NET Framework project by creating Dockerfile and .dockerfile files customized for the project. |
-| [ASP.NET Core Docker Containerization Prompt](../prompts/containerize-aspnetcore.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcontainerize-aspnetcore.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcontainerize-aspnetcore.prompt.md) | Containerize an ASP.NET Core project by creating Dockerfile and .dockerfile files customized for the project. |
-| [ASP.NET Minimal API with OpenAPI](../prompts/aspnet-minimal-api-openapi.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Faspnet-minimal-api-openapi.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Faspnet-minimal-api-openapi.prompt.md) | Create ASP.NET Minimal API endpoints with proper OpenAPI documentation |
-| [Automating Filling in a Form with Playwright MCP](../prompts/playwright-automation-fill-in-form.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fplaywright-automation-fill-in-form.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fplaywright-automation-fill-in-form.prompt.md) | Automate filling in a form using Playwright MCP |
-| [Azure Cosmos DB NoSQL Data Modeling Expert System Prompt](../prompts/cosmosdb-datamodeling.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcosmosdb-datamodeling.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcosmosdb-datamodeling.prompt.md) | Step-by-step guide for capturing key application requirements for NoSQL use-case and produce Azure Cosmos DB Data NoSQL Model design using best practices and common patterns, artifacts_produced: "cosmosdb_requirements.md" file and "cosmosdb_data_model.md" file |
-| [Azure Cost Optimize](../prompts/az-cost-optimize.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Faz-cost-optimize.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Faz-cost-optimize.prompt.md) | Analyze Azure resources used in the app (IaC files and/or resources in a target rg) and optimize costs - creating GitHub issues for identified optimizations. |
-| [Azure Resource Health & Issue Diagnosis](../prompts/azure-resource-health-diagnose.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fazure-resource-health-diagnose.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fazure-resource-health-diagnose.prompt.md) | Analyze Azure resource health, diagnose issues from logs and telemetry, and create a remediation plan for identified problems. |
-| [Boost Prompt](../prompts/boost-prompt.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fboost-prompt.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fboost-prompt.prompt.md) | Interactive prompt refinement workflow: interrogates scope, deliverables, constraints; copies final markdown to clipboard; never writes code. Requires the Joyride extension. |
-| [C# Async Programming Best Practices](../prompts/csharp-async.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-async.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-async.prompt.md) | Get best practices for C# async programming |
-| [C# Documentation Best Practices](../prompts/csharp-docs.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-docs.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-docs.prompt.md) | Ensure that C# types are documented with XML comments and follow best practices for documentation. |
-| [CentOS Linux Triage](../prompts/centos-linux-triage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcentos-linux-triage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcentos-linux-triage.prompt.md) | Triage and resolve CentOS issues using RHEL-compatible tooling, SELinux-aware practices, and firewalld. |
-| [Code Exemplars Blueprint Generator](../prompts/code-exemplars-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcode-exemplars-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcode-exemplars-blueprint-generator.prompt.md) | Technology-agnostic prompt generator that creates customizable AI prompts for scanning codebases and identifying high-quality code exemplars. Supports multiple programming languages (.NET, Java, JavaScript, TypeScript, React, Angular, Python) with configurable analysis depth, categorization methods, and documentation formats to establish coding standards and maintain consistency across development teams. |
-| [Comment Code Generate A Tutorial](../prompts/comment-code-generate-a-tutorial.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcomment-code-generate-a-tutorial.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcomment-code-generate-a-tutorial.prompt.md) | Transform this Python script into a polished, beginner-friendly project by refactoring the code, adding clear instructional comments, and generating a complete markdown tutorial. |
-| [Comprehensive Project Architecture Blueprint Generator](../prompts/architecture-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Farchitecture-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Farchitecture-blueprint-generator.prompt.md) | Comprehensive project architecture blueprint generator that analyzes codebases to create detailed architectural documentation. Automatically detects technology stacks and architectural patterns, generates visual diagrams, documents implementation patterns, and provides extensible blueprints for maintaining architectural consistency and guiding new development. |
-| [Comprehensive Technology Stack Blueprint Generator](../prompts/technology-stack-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftechnology-stack-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftechnology-stack-blueprint-generator.prompt.md) | Comprehensive technology stack blueprint generator that analyzes codebases to create detailed architectural documentation. Automatically detects technology stacks, programming languages, and implementation patterns across multiple platforms (.NET, Java, JavaScript, React, Python). Generates configurable blueprints with version information, licensing details, usage patterns, coding conventions, and visual diagrams. Provides implementation-ready templates and maintains architectural consistency for guided development. |
-| [Context Map](../prompts/context-map.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcontext-map.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcontext-map.prompt.md) | Generate a map of all files relevant to a task before making changes |
-| [Conventional Commit](../prompts/conventional-commit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fconventional-commit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fconventional-commit.prompt.md) | Prompt and workflow for generating conventional commit messages using a structured XML format. Guides users to create standardized, descriptive commit messages in line with the Conventional Commits specification, including instructions, examples, and validation. |
-| [Convert Plaintext Documentation to Markdown](../prompts/convert-plaintext-to-md.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fconvert-plaintext-to-md.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fconvert-plaintext-to-md.prompt.md) | Convert a text-based document to markdown following instructions from prompt, or if a documented option is passed, follow the instructions for that option. |
-| [Copilot Instructions Blueprint Generator](../prompts/copilot-instructions-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcopilot-instructions-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcopilot-instructions-blueprint-generator.prompt.md) | Technology-agnostic blueprint generator for creating comprehensive copilot-instructions.md files that guide GitHub Copilot to produce code consistent with project standards, architecture patterns, and exact technology versions by analyzing existing codebase patterns and avoiding assumptions. |
-| [Create Architectural Decision Record](../prompts/create-architectural-decision-record.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-architectural-decision-record.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-architectural-decision-record.prompt.md) | Create an Architectural Decision Record (ADR) document for AI-optimized decision documentation. |
-| [Create GitHub Actions Workflow Specification](../prompts/create-github-action-workflow-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-action-workflow-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-action-workflow-specification.prompt.md) | Create a formal specification for an existing GitHub Actions CI/CD workflow, optimized for AI consumption and workflow maintenance. |
-| [Create GitHub Issue from Implementation Plan](../prompts/create-github-issues-feature-from-implementation-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-issues-feature-from-implementation-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-issues-feature-from-implementation-plan.prompt.md) | Create GitHub Issues from implementation plan phases using feature_request.yml or chore_request.yml templates. |
-| [Create GitHub Issue from Specification](../prompts/create-github-issue-feature-from-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-issue-feature-from-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-issue-feature-from-specification.prompt.md) | Create GitHub Issue for feature request from specification file using feature_request.yml template. |
-| [Create GitHub Issues for Unmet Specification Requirements](../prompts/create-github-issues-for-unmet-specification-requirements.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-issues-for-unmet-specification-requirements.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-issues-for-unmet-specification-requirements.prompt.md) | Create GitHub Issues for unimplemented requirements from specification files using feature_request.yml template. |
-| [Create GitHub Pull Request from Specification](../prompts/create-github-pull-request-from-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-pull-request-from-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-github-pull-request-from-specification.prompt.md) | Create GitHub Pull Request for feature request from specification file using pull_request_template.md template. |
-| [Create highβquality AGENTS.md file](../prompts/create-agentsmd.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-agentsmd.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-agentsmd.prompt.md) | Prompt for generating an AGENTS.md file for a repository |
-| [Create Implementation Plan](../prompts/create-implementation-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-implementation-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-implementation-plan.prompt.md) | Create a new implementation plan file for new features, refactoring existing code or upgrading packages, design, architecture or infrastructure. |
-| [Create LLMs.txt File from Repository Structure](../prompts/create-llms.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-llms.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-llms.prompt.md) | Create an llms.txt file from scratch based on repository structure following the llms.txt specification at https://llmstxt.org/ |
-| [Create Readme](../prompts/create-readme.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-readme.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-readme.prompt.md) | Create a README.md file for the project |
-| [Create Specification](../prompts/create-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-specification.prompt.md) | Create a new specification file for the solution, optimized for Generative AI consumption. |
-| [Create Spring Boot Java project prompt](../prompts/create-spring-boot-java-project.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-spring-boot-java-project.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-spring-boot-java-project.prompt.md) | Create Spring Boot Java Project Skeleton |
-| [Create Spring Boot Kotlin project prompt](../prompts/create-spring-boot-kotlin-project.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-spring-boot-kotlin-project.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-spring-boot-kotlin-project.prompt.md) | Create Spring Boot Kotlin Project Skeleton |
-| [Create Technical Spike Document](../prompts/create-technical-spike.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-technical-spike.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-technical-spike.prompt.md) | Create time-boxed technical spike documents for researching and resolving critical development decisions before implementation. |
-| [Create TLDR Page](../prompts/create-tldr-page.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-tldr-page.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-tldr-page.prompt.md) | Create a tldr page from documentation URLs and command examples, requiring both URL and command name. |
-| [Create TypeSpec API Plugin](../prompts/typespec-create-api-plugin.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftypespec-create-api-plugin.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftypespec-create-api-plugin.prompt.md) | Generate a TypeSpec API plugin with REST operations, authentication, and Adaptive Cards for Microsoft 365 Copilot |
-| [Create TypeSpec Declarative Agent](../prompts/typespec-create-agent.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftypespec-create-agent.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftypespec-create-agent.prompt.md) | Generate a complete TypeSpec declarative agent with instructions, capabilities, and conversation starters for Microsoft 365 Copilot |
-| [Dataverse Python Production Code Generator](../prompts/dataverse-python-production-code.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdataverse-python-production-code.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdataverse-python-production-code.prompt.md) | Generate production-ready Python code using Dataverse SDK with error handling, optimization, and best practices |
-| [Dataverse Python Use Case Solution Builder](../prompts/dataverse-python-usecase-builder.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdataverse-python-usecase-builder.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdataverse-python-usecase-builder.prompt.md) | Generate complete solutions for specific Dataverse SDK use cases with architecture recommendations |
-| [Dataverse Python Advanced Patterns](../prompts/dataverse-python-advanced-patterns.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdataverse-python-advanced-patterns.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdataverse-python-advanced-patterns.prompt.md) | Generate production code for Dataverse SDK using advanced patterns, error handling, and optimization techniques. |
-| [Dataverse Python Quickstart Generator](../prompts/dataverse-python-quickstart.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdataverse-python-quickstart.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdataverse-python-quickstart.prompt.md) | Generate Python SDK setup + CRUD + bulk + paging snippets using official patterns. |
-| [Debian Linux Triage](../prompts/debian-linux-triage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdebian-linux-triage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdebian-linux-triage.prompt.md) | Triage and resolve Debian Linux issues with apt, systemd, and AppArmor-aware guidance. |
-| [DevOps Rollout Plan Generator](../prompts/devops-rollout-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdevops-rollout-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdevops-rollout-plan.prompt.md) | Generate comprehensive rollout plans with preflight checks, step-by-step deployment, verification signals, rollback procedures, and communication plans for infrastructure and application changes |
-| [DiΓ‘taxis Documentation Expert](../prompts/documentation-writer.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdocumentation-writer.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdocumentation-writer.prompt.md) | DiΓ‘taxis Documentation Expert. An expert technical writer specializing in creating high-quality software documentation, guided by the principles and structure of the DiΓ‘taxis technical documentation authoring framework. |
-| [EditorConfig Expert](../prompts/editorconfig.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Feditorconfig.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Feditorconfig.prompt.md) | Generates a comprehensive and best-practice-oriented .editorconfig file based on project analysis and user preferences. |
-| [Entity Framework Core Best Practices](../prompts/ef-core.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fef-core.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fef-core.prompt.md) | Get best practices for Entity Framework Core |
-| [Epic Architecture Specification Prompt](../prompts/breakdown-epic-arch.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-epic-arch.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-epic-arch.prompt.md) | Prompt for creating the high-level technical architecture for an Epic, based on a Product Requirements Document. |
-| [Epic Product Requirements Document (PRD) Prompt](../prompts/breakdown-epic-pm.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-epic-pm.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-epic-pm.prompt.md) | Prompt for creating an Epic Product Requirements Document (PRD) for a new epic. This PRD will be used as input for generating a technical architecture specification. |
-| [Feature Implementation Plan Prompt](../prompts/breakdown-feature-implementation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-feature-implementation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-feature-implementation.prompt.md) | Prompt for creating detailed feature implementation plans, following Epoch monorepo structure. |
-| [Feature PRD Prompt](../prompts/breakdown-feature-prd.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-feature-prd.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-feature-prd.prompt.md) | Prompt for creating Product Requirements Documents (PRDs) for new features, based on an Epic. |
-| [Fedora Linux Triage](../prompts/fedora-linux-triage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ffedora-linux-triage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ffedora-linux-triage.prompt.md) | Triage and resolve Fedora issues with dnf, systemd, and SELinux-aware guidance. |
-| [Finalize Agent Prompt](../prompts/finalize-agent-prompt.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ffinalize-agent-prompt.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ffinalize-agent-prompt.prompt.md) | Finalize prompt file using the role of an AI agent to polish the prompt for the end user. |
-| [Generate Application from OpenAPI Spec](../prompts/openapi-to-application-code.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fopenapi-to-application-code.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fopenapi-to-application-code.prompt.md) | Generate a complete, production-ready application from an OpenAPI specification |
-| [Generate C# MCP Server](../prompts/csharp-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-mcp-server-generator.prompt.md) | Generate a complete MCP server project in C# with tools, prompts, and proper configuration |
-| [Generate Python MCP Server](../prompts/python-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpython-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpython-mcp-server-generator.prompt.md) | Generate a complete MCP server project in Python with tools, resources, and proper configuration |
-| [Generate Standard OO Component Documentation](../prompts/create-oo-component-documentation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-oo-component-documentation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcreate-oo-component-documentation.prompt.md) | Create comprehensive, standardized documentation for object-oriented components following industry best practices and architectural documentation standards. |
-| [Generate TypeScript MCP Server](../prompts/typescript-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftypescript-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftypescript-mcp-server-generator.prompt.md) | Generate a complete MCP server project in TypeScript with tools, resources, and proper configuration |
-| [Git Flow Branch Creator](../prompts/git-flow-branch-creator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgit-flow-branch-creator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgit-flow-branch-creator.prompt.md) | Intelligent Git Flow branch creator that analyzes git status/diff and creates appropriate branches following the nvie Git Flow branching model. |
-| [Github Copilot Starter](../prompts/github-copilot-starter.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgithub-copilot-starter.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgithub-copilot-starter.prompt.md) | Set up complete GitHub Copilot configuration for a new project based on technology stack |
-| [GitHub Issue Planning & Project Automation Prompt](../prompts/breakdown-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-plan.prompt.md) | Issue Planning and Automation prompt that generates comprehensive project plans with Epic > Feature > Story/Enabler > Test hierarchy, dependencies, priorities, and automated tracking. |
-| [Go MCP Server Project Generator](../prompts/go-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgo-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgo-mcp-server-generator.prompt.md) | Generate a complete Go MCP server project with proper structure, dependencies, and implementation using the official github.com/modelcontextprotocol/go-sdk. |
-| [GraalVM Native Image Agent](../prompts/java-add-graalvm-native-image-support.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-add-graalvm-native-image-support.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-add-graalvm-native-image-support.prompt.md) | GraalVM Native Image expert that adds native image support to Java applications, builds the project, analyzes build errors, applies fixes, and iterates until successful compilation using Oracle best practices. |
-| [Interactive Programming Nudge](../prompts/remember-interactive-programming.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fremember-interactive-programming.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fremember-interactive-programming.prompt.md) | A micro-prompt that reminds the agent that it is an interactive programmer. Works great in Clojure when Copilot has access to the REPL (probably via Backseat Driver). Will work with any system that has a live REPL that the agent can use. Adapt the prompt with any specific reminders in your workflow and/or workspace. |
-| [Java Documentation (Javadoc) Best Practices](../prompts/java-docs.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-docs.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-docs.prompt.md) | Ensure that Java types are documented with Javadoc comments and follow best practices for documentation. |
-| [Java MCP Server Generator](../prompts/java-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-mcp-server-generator.prompt.md) | Generate a complete Model Context Protocol server project in Java using the official MCP Java SDK with reactive streams and optional Spring Boot integration. |
-| [Javascript Typescript Jest](../prompts/javascript-typescript-jest.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjavascript-typescript-jest.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjavascript-typescript-jest.prompt.md) | Best practices for writing JavaScript/TypeScript tests using Jest, including mocking strategies, test structure, and common patterns. |
-| [JUnit 5+ Best Practices](../prompts/java-junit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-junit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-junit.prompt.md) | Get best practices for JUnit 5 unit testing, including data-driven tests |
-| [Kotlin MCP Server Project Generator](../prompts/kotlin-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fkotlin-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fkotlin-mcp-server-generator.prompt.md) | Generate a complete Kotlin MCP server project with proper structure, dependencies, and implementation using the official io.modelcontextprotocol:kotlin-sdk library. |
-| [Mcp Create Adaptive Cards](../prompts/mcp-create-adaptive-cards.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmcp-create-adaptive-cards.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmcp-create-adaptive-cards.prompt.md) | | |
-| [Mcp Create Declarative Agent](../prompts/mcp-create-declarative-agent.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmcp-create-declarative-agent.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmcp-create-declarative-agent.prompt.md) | | |
-| [Mcp Deploy Manage Agents](../prompts/mcp-deploy-manage-agents.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmcp-deploy-manage-agents.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmcp-deploy-manage-agents.prompt.md) | | |
-| [Memory Keeper](../prompts/remember.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fremember.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fremember.prompt.md) | Transforms lessons learned into domain-organized memory instructions (global or workspace). Syntax: `/remember [>domain [scope]] lesson clue` where scope is `global` (default), `user`, `workspace`, or `ws`. |
-| [Memory Merger](../prompts/memory-merger.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmemory-merger.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmemory-merger.prompt.md) | Merges mature lessons from a domain memory file into its instruction file. Syntax: `/memory-merger >domain [scope]` where scope is `global` (default), `user`, `workspace`, or `ws`. |
-| [Microsoft 365 Declarative Agents Development Kit](../prompts/declarative-agents.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdeclarative-agents.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fdeclarative-agents.prompt.md) | Complete development kit for Microsoft 365 Copilot declarative agents with three comprehensive workflows (basic, advanced, validation), TypeSpec support, and Microsoft 365 Agents Toolkit integration |
-| [Migration and Code Evolution Instructions Generator](../prompts/generate-custom-instructions-from-codebase.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgenerate-custom-instructions-from-codebase.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgenerate-custom-instructions-from-codebase.prompt.md) | Migration and code evolution instructions generator for GitHub Copilot. Analyzes differences between two project versions (branches, commits, or releases) to create precise instructions allowing Copilot to maintain consistency during technology migrations, major refactoring, or framework version upgrades. |
-| [MkDocs AI Translator](../prompts/mkdocs-translations.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmkdocs-translations.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmkdocs-translations.prompt.md) | Generate a language translation for a mkdocs documentation stack. |
-| [MSTest Best Practices (MSTest 3.x/4.x)](../prompts/csharp-mstest.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-mstest.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-mstest.prompt.md) | Get best practices for MSTest 3.x/4.x unit testing, including modern assertion APIs and data-driven tests |
-| [Multi Stage Dockerfile](../prompts/multi-stage-dockerfile.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmulti-stage-dockerfile.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmulti-stage-dockerfile.prompt.md) | Create optimized multi-stage Dockerfiles for any language or framework |
-| [My Issues](../prompts/my-issues.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmy-issues.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmy-issues.prompt.md) | List my issues in the current repository |
-| [My Pull Requests](../prompts/my-pull-requests.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmy-pull-requests.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmy-pull-requests.prompt.md) | List my pull requests in the current repository |
-| [Next Intl Add Language](../prompts/next-intl-add-language.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fnext-intl-add-language.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fnext-intl-add-language.prompt.md) | Add new language to a Next.js + next-intl application |
-| [NUnit Best Practices](../prompts/csharp-nunit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-nunit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-nunit.prompt.md) | Get best practices for NUnit unit testing, including data-driven tests |
-| [PHP MCP Server Generator](../prompts/php-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fphp-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fphp-mcp-server-generator.prompt.md) | Generate a complete PHP Model Context Protocol server project with tools, resources, prompts, and tests using the official PHP SDK |
-| [PostgreSQL Code Review Assistant](../prompts/postgresql-code-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpostgresql-code-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpostgresql-code-review.prompt.md) | PostgreSQL-specific code review assistant focusing on PostgreSQL best practices, anti-patterns, and unique quality standards. Covers JSONB operations, array usage, custom types, schema design, function optimization, and PostgreSQL-exclusive security features like Row Level Security (RLS). |
-| [PostgreSQL Development Assistant](../prompts/postgresql-optimization.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpostgresql-optimization.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpostgresql-optimization.prompt.md) | PostgreSQL-specific development assistant focusing on unique PostgreSQL features, advanced data types, and PostgreSQL-exclusive capabilities. Covers JSONB operations, array types, custom types, range/geometric types, full-text search, window functions, and PostgreSQL extensions ecosystem. |
-| [Power Apps Code Apps Project Scaffolding](../prompts/power-apps-code-app-scaffold.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-apps-code-app-scaffold.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-apps-code-app-scaffold.prompt.md) | Scaffold a complete Power Apps Code App project with PAC CLI setup, SDK integration, and connector configuration |
-| [Power BI Data Model Design Review](../prompts/power-bi-model-design-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-bi-model-design-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-bi-model-design-review.prompt.md) | Comprehensive Power BI data model design review prompt for evaluating model architecture, relationships, and optimization opportunities. |
-| [Power BI DAX Formula Optimizer](../prompts/power-bi-dax-optimization.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-bi-dax-optimization.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-bi-dax-optimization.prompt.md) | Comprehensive Power BI DAX formula optimization prompt for improving performance, readability, and maintainability of DAX calculations. |
-| [Power BI Performance Troubleshooting Guide](../prompts/power-bi-performance-troubleshooting.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-bi-performance-troubleshooting.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-bi-performance-troubleshooting.prompt.md) | Systematic Power BI performance troubleshooting prompt for identifying, diagnosing, and resolving performance issues in Power BI models, reports, and queries. |
-| [Power BI Report Visualization Designer](../prompts/power-bi-report-design-consultation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-bi-report-design-consultation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-bi-report-design-consultation.prompt.md) | Power BI report visualization design prompt for creating effective, user-friendly, and accessible reports with optimal chart selection and layout design. |
-| [Power Platform MCP Connector Generator](../prompts/mcp-copilot-studio-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmcp-copilot-studio-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fmcp-copilot-studio-server-generator.prompt.md) | Generate a complete MCP server implementation optimized for Copilot Studio integration with proper schema constraints and streamable HTTP support |
-| [Power Platform MCP Connector Suite](../prompts/power-platform-mcp-connector-suite.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-platform-mcp-connector-suite.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpower-platform-mcp-connector-suite.prompt.md) | Generate complete Power Platform custom connector with MCP integration for Copilot Studio - includes schema generation, troubleshooting, and validation |
-| [Product Manager Assistant: Feature Identification and Specification](../prompts/gen-specs-as-issues.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgen-specs-as-issues.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fgen-specs-as-issues.prompt.md) | This workflow guides you through a systematic approach to identify missing features, prioritize them, and create detailed specifications for implementation. |
-| [Professional Prompt Builder](../prompts/prompt-builder.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fprompt-builder.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fprompt-builder.prompt.md) | Guide users through creating high-quality GitHub Copilot prompts with proper structure, tools, and best practices. |
-| [Project Folder Structure Blueprint Generator](../prompts/folder-structure-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ffolder-structure-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ffolder-structure-blueprint-generator.prompt.md) | Comprehensive technology-agnostic prompt for analyzing and documenting project folder structures. Auto-detects project types (.NET, Java, React, Angular, Python, Node.js, Flutter), generates detailed blueprints with visualization options, naming conventions, file placement patterns, and extension templates for maintaining consistent code organization across diverse technology stacks. |
-| [Project Workflow Documentation Generator](../prompts/project-workflow-analysis-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fproject-workflow-analysis-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fproject-workflow-analysis-blueprint-generator.prompt.md) | Comprehensive technology-agnostic prompt generator for documenting end-to-end application workflows. Automatically detects project architecture patterns, technology stacks, and data flow patterns to generate detailed implementation blueprints covering entry points, service layers, data access, error handling, and testing approaches across multiple technologies including .NET, Java/Spring, React, and microservices architectures. |
-| [Pytest Coverage](../prompts/pytest-coverage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpytest-coverage.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fpytest-coverage.prompt.md) | Run pytest tests with coverage, discover lines missing coverage, and increase coverage to 100%. |
-| [README Generator Prompt](../prompts/readme-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Freadme-blueprint-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Freadme-blueprint-generator.prompt.md) | Intelligent README.md generation prompt that analyzes project documentation structure and creates comprehensive repository documentation. Scans .github/copilot directory files and copilot-instructions.md to extract project information, technology stack, architecture, development workflow, coding standards, and testing approaches while generating well-structured markdown documentation with proper formatting, cross-references, and developer-focused content. |
-| [Refactor Method Complexity Reduce](../prompts/refactor-method-complexity-reduce.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Frefactor-method-complexity-reduce.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Frefactor-method-complexity-reduce.prompt.md) | Refactor given method `${input:methodName}` to reduce its cognitive complexity to `${input:complexityThreshold}` or below, by extracting helper methods. |
-| [Refactor Plan](../prompts/refactor-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Frefactor-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Frefactor-plan.prompt.md) | Plan a multi-file refactor with proper sequencing and rollback steps |
-| [Refactoring Java Methods with Extract Method](../prompts/java-refactoring-extract-method.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-refactoring-extract-method.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-refactoring-extract-method.prompt.md) | Refactoring using Extract Methods in Java Language |
-| [Refactoring Java Methods with Remove Parameter](../prompts/java-refactoring-remove-parameter.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-refactoring-remove-parameter.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-refactoring-remove-parameter.prompt.md) | Refactoring using Remove Parameter in Java Language |
-| [Repo Story Time](../prompts/repo-story-time.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Frepo-story-time.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Frepo-story-time.prompt.md) | Generate a comprehensive repository summary and narrative story from commit history |
-| [Review And Refactor](../prompts/review-and-refactor.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Freview-and-refactor.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Freview-and-refactor.prompt.md) | Review and refactor code in your project according to defined instructions |
-| [Ruby MCP Server Generator](../prompts/ruby-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fruby-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fruby-mcp-server-generator.prompt.md) | Generate a complete Model Context Protocol server project in Ruby using the official MCP Ruby SDK gem. |
-| [Rust Mcp Server Generator](../prompts/rust-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Frust-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Frust-mcp-server-generator.prompt.md) | Generate a complete Rust Model Context Protocol server project with tools, prompts, resources, and tests using the official rmcp SDK |
-| [Sa Generate](../prompts/structured-autonomy-generate.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fstructured-autonomy-generate.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fstructured-autonomy-generate.prompt.md) | Structured Autonomy Implementation Generator Prompt |
-| [Sa Implement](../prompts/structured-autonomy-implement.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fstructured-autonomy-implement.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fstructured-autonomy-implement.prompt.md) | Structured Autonomy Implementation Prompt |
-| [Sa Plan](../prompts/structured-autonomy-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fstructured-autonomy-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fstructured-autonomy-plan.prompt.md) | Structured Autonomy Planning Prompt |
-| [Shuffle JSON Data](../prompts/shuffle-json-data.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fshuffle-json-data.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fshuffle-json-data.prompt.md) | Shuffle repetitive JSON objects safely by validating schema consistency before randomising entries. |
-| [Spring Boot Best Practices](../prompts/java-springboot.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-springboot.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fjava-springboot.prompt.md) | Get best practices for developing applications with Spring Boot. |
-| [Spring Boot with Kotlin Best Practices](../prompts/kotlin-springboot.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fkotlin-springboot.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fkotlin-springboot.prompt.md) | Get best practices for developing applications with Spring Boot and Kotlin. |
-| [SQL Code Review](../prompts/sql-code-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsql-code-review.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsql-code-review.prompt.md) | Universal SQL code review assistant that performs comprehensive security, maintainability, and code quality analysis across all SQL databases (MySQL, PostgreSQL, SQL Server, Oracle). Focuses on SQL injection prevention, access control, code standards, and anti-pattern detection. Complements SQL optimization prompt for complete development coverage. |
-| [SQL Performance Optimization Assistant](../prompts/sql-optimization.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsql-optimization.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsql-optimization.prompt.md) | Universal SQL performance optimization assistant for comprehensive query tuning, indexing strategies, and database performance analysis across all SQL databases (MySQL, PostgreSQL, SQL Server, Oracle). Provides execution plan analysis, pagination optimization, batch operations, and performance monitoring guidance. |
-| [Suggest Awesome GitHub Copilot Custom Agents](../prompts/suggest-awesome-github-copilot-agents.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsuggest-awesome-github-copilot-agents.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsuggest-awesome-github-copilot-agents.prompt.md) | Suggest relevant GitHub Copilot Custom Agents files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing custom agents in this repository, and identifying outdated agents that need updates. |
-| [Suggest Awesome GitHub Copilot Instructions](../prompts/suggest-awesome-github-copilot-instructions.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsuggest-awesome-github-copilot-instructions.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsuggest-awesome-github-copilot-instructions.prompt.md) | Suggest relevant GitHub Copilot instruction files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing instructions in this repository, and identifying outdated instructions that need updates. |
-| [Suggest Awesome GitHub Copilot Prompts](../prompts/suggest-awesome-github-copilot-prompts.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsuggest-awesome-github-copilot-prompts.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsuggest-awesome-github-copilot-prompts.prompt.md) | Suggest relevant GitHub Copilot prompt files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing prompts in this repository, and identifying outdated prompts that need updates. |
-| [Suggest Awesome GitHub Copilot Skills](../prompts/suggest-awesome-github-copilot-skills.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsuggest-awesome-github-copilot-skills.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fsuggest-awesome-github-copilot-skills.prompt.md) | Suggest relevant GitHub Copilot skills from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing skills in this repository, and identifying outdated skills that need updates. |
-| [Swift MCP Server Generator](../prompts/swift-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fswift-mcp-server-generator.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fswift-mcp-server-generator.prompt.md) | Generate a complete Model Context Protocol server project in Swift using the official MCP Swift SDK package. |
-| [Test Generation with Playwright MCP](../prompts/playwright-generate-test.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fplaywright-generate-test.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fplaywright-generate-test.prompt.md) | Generate a Playwright test based on a scenario using Playwright MCP |
-| [Test Planning & Quality Assurance Prompt](../prompts/breakdown-test.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-test.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbreakdown-test.prompt.md) | Test Planning and Quality Assurance prompt that generates comprehensive test strategies, task breakdowns, and quality validation plans for GitHub projects. |
-| [TLDR Prompt](../prompts/tldr-prompt.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftldr-prompt.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Ftldr-prompt.prompt.md) | Create tldr summaries for GitHub Copilot files (prompts, agents, instructions, collections), MCP servers, or documentation from URLs and queries. |
-| [TUnit Best Practices](../prompts/csharp-tunit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-tunit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-tunit.prompt.md) | Get best practices for TUnit unit testing, including data-driven tests |
-| [Update Azure Verified Modules in Bicep Files](../prompts/update-avm-modules-in-bicep.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-avm-modules-in-bicep.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-avm-modules-in-bicep.prompt.md) | Update Azure Verified Modules (AVM) to latest versions in Bicep files. |
-| [Update Implementation Plan](../prompts/update-implementation-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-implementation-plan.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-implementation-plan.prompt.md) | Update an existing implementation plan file with new or update requirements to provide new features, refactoring existing code or upgrading packages, design, architecture or infrastructure. |
-| [Update LLMs.txt File](../prompts/update-llms.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-llms.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-llms.prompt.md) | Update the llms.txt file in the root folder to reflect changes in documentation or specifications following the llms.txt specification at https://llmstxt.org/ |
-| [Update Markdown File Index](../prompts/update-markdown-file-index.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-markdown-file-index.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-markdown-file-index.prompt.md) | Update a markdown file section with an index/table of files from a specified folder. |
-| [Update Specification](../prompts/update-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-specification.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-specification.prompt.md) | Update an existing specification file for the solution, optimized for Generative AI consumption based on new requirements or updates to any existing code. |
-| [Update Standard OO Component Documentation](../prompts/update-oo-component-documentation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-oo-component-documentation.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fupdate-oo-component-documentation.prompt.md) | Update existing object-oriented component documentation following industry best practices and architectural documentation standards. |
-| [Website Exploration for Testing](../prompts/playwright-explore-website.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fplaywright-explore-website.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fplaywright-explore-website.prompt.md) | Website exploration for testing using Playwright MCP |
-| [What Context Do You Need?](../prompts/what-context-needed.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fwhat-context-needed.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fwhat-context-needed.prompt.md) | Ask Copilot what files it needs to see before answering a question |
-| [Write Coding Standards From File](../prompts/write-coding-standards-from-file.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fwrite-coding-standards-from-file.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fwrite-coding-standards-from-file.prompt.md) | Write a coding standards document for a project using the coding styles from the file(s) and/or folder(s) passed as arguments in the prompt. |
-| [XUnit Best Practices](../prompts/csharp-xunit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-xunit.prompt.md) [](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-xunit.prompt.md) | Get best practices for XUnit unit testing, including data-driven tests |
diff --git a/docs/README.skills.md b/docs/README.skills.md
index f047c84f..70bb47ec 100644
--- a/docs/README.skills.md
+++ b/docs/README.skills.md
@@ -22,54 +22,196 @@ Skills differ from other primitives by supporting bundled assets (scripts, code
| Name | Description | Bundled Assets |
| ---- | ----------- | -------------- |
+| [add-educational-comments](../skills/add-educational-comments/SKILL.md) | Add educational comments to the file specified, or prompt asking for file to comment if one is not provided. | None |
| [agent-governance](../skills/agent-governance/SKILL.md) | Patterns and techniques for adding governance, safety, and trust controls to AI agent systems. Use this skill when: - Building AI agents that call external tools (APIs, databases, file systems) - Implementing policy-based access controls for agent tool usage - Adding semantic intent classification to detect dangerous prompts - Creating trust scoring systems for multi-agent workflows - Building audit trails for agent actions and decisions - Enforcing rate limits, content filters, or tool restrictions on agents - Working with any agent framework (PydanticAI, CrewAI, OpenAI Agents, LangChain, AutoGen) | None |
| [agentic-eval](../skills/agentic-eval/SKILL.md) | Patterns and techniques for evaluating and improving AI agent outputs. Use this skill when: - Implementing self-critique and reflection loops - Building evaluator-optimizer pipelines for quality-critical generation - Creating test-driven code refinement workflows - Designing rubric-based or LLM-as-judge evaluation systems - Adding iterative improvement to agent outputs (code, reports, analysis) - Measuring and improving agent response quality | None |
+| [ai-prompt-engineering-safety-review](../skills/ai-prompt-engineering-safety-review/SKILL.md) | Comprehensive AI prompt engineering safety review and improvement prompt. Analyzes prompts for safety, bias, security vulnerabilities, and effectiveness while providing detailed improvement recommendations with extensive frameworks, testing methodologies, and educational content. | None |
| [appinsights-instrumentation](../skills/appinsights-instrumentation/SKILL.md) | Instrument a webapp to send useful telemetry data to Azure App Insights | `LICENSE.txt` `examples/appinsights.bicep` `references/ASPNETCORE.md` `references/AUTO.md` `references/NODEJS.md` `references/PYTHON.md` `scripts/appinsights.ps1` |
+| [apple-appstore-reviewer](../skills/apple-appstore-reviewer/SKILL.md) | Serves as a reviewer of the codebase with instructions on looking for Apple App Store optimizations or rejection reasons. | None |
+| [arch-linux-triage](../skills/arch-linux-triage/SKILL.md) | Triage and resolve Arch Linux issues with pacman, systemd, and rolling-release best practices. | None |
+| [architecture-blueprint-generator](../skills/architecture-blueprint-generator/SKILL.md) | Comprehensive project architecture blueprint generator that analyzes codebases to create detailed architectural documentation. Automatically detects technology stacks and architectural patterns, generates visual diagrams, documents implementation patterns, and provides extensible blueprints for maintaining architectural consistency and guiding new development. | None |
| [aspire](../skills/aspire/SKILL.md) | Aspire skill covering the Aspire CLI, AppHost orchestration, service discovery, integrations, MCP server, VS Code extension, Dev Containers, GitHub Codespaces, templates, dashboard, and deployment. Use when the user asks to create, run, debug, configure, deploy, or troubleshoot an Aspire distributed application. | `references/architecture.md` `references/cli-reference.md` `references/dashboard.md` `references/deployment.md` `references/integrations-catalog.md` `references/mcp-server.md` `references/polyglot-apis.md` `references/testing.md` `references/troubleshooting.md` |
+| [aspnet-minimal-api-openapi](../skills/aspnet-minimal-api-openapi/SKILL.md) | Create ASP.NET Minimal API endpoints with proper OpenAPI documentation | None |
+| [az-cost-optimize](../skills/az-cost-optimize/SKILL.md) | Analyze Azure resources used in the app (IaC files and/or resources in a target rg) and optimize costs - creating GitHub issues for identified optimizations. | None |
| [azure-deployment-preflight](../skills/azure-deployment-preflight/SKILL.md) | Performs comprehensive preflight validation of Bicep deployments to Azure, including template syntax validation, what-if analysis, and permission checks. Use this skill before any deployment to Azure to preview changes, identify potential issues, and ensure the deployment will succeed. Activate when users mention deploying to Azure, validating Bicep files, checking deployment permissions, previewing infrastructure changes, running what-if, or preparing for azd provision. | `references/ERROR-HANDLING.md` `references/REPORT-TEMPLATE.md` `references/VALIDATION-COMMANDS.md` |
| [azure-devops-cli](../skills/azure-devops-cli/SKILL.md) | Manage Azure DevOps resources via CLI including projects, repos, pipelines, builds, pull requests, work items, artifacts, and service endpoints. Use when working with Azure DevOps, az commands, devops automation, CI/CD, or when user mentions Azure DevOps CLI. | None |
+| [azure-resource-health-diagnose](../skills/azure-resource-health-diagnose/SKILL.md) | Analyze Azure resource health, diagnose issues from logs and telemetry, and create a remediation plan for identified problems. | None |
| [azure-resource-visualizer](../skills/azure-resource-visualizer/SKILL.md) | Analyze Azure resource groups and generate detailed Mermaid architecture diagrams showing the relationships between individual resources. Use this skill when the user asks for a diagram of their Azure resources or help in understanding how the resources relate to each other. | `LICENSE.txt` `assets/template-architecture.md` |
| [azure-role-selector](../skills/azure-role-selector/SKILL.md) | When user is asking for guidance for which role to assign to an identity given desired permissions, this agent helps them understand the role that will meet the requirements with least privilege access and how to apply that role. | `LICENSE.txt` |
| [azure-static-web-apps](../skills/azure-static-web-apps/SKILL.md) | Helps create, configure, and deploy Azure Static Web Apps using the SWA CLI. Use when deploying static sites to Azure, setting up SWA local development, configuring staticwebapp.config.json, adding Azure Functions APIs to SWA, or setting up GitHub Actions CI/CD for Static Web Apps. | None |
+| [boost-prompt](../skills/boost-prompt/SKILL.md) | Interactive prompt refinement workflow: interrogates scope, deliverables, constraints; copies final markdown to clipboard; never writes code. Requires the Joyride extension. | None |
+| [breakdown-epic-arch](../skills/breakdown-epic-arch/SKILL.md) | Prompt for creating the high-level technical architecture for an Epic, based on a Product Requirements Document. | None |
+| [breakdown-epic-pm](../skills/breakdown-epic-pm/SKILL.md) | Prompt for creating an Epic Product Requirements Document (PRD) for a new epic. This PRD will be used as input for generating a technical architecture specification. | None |
+| [breakdown-feature-implementation](../skills/breakdown-feature-implementation/SKILL.md) | Prompt for creating detailed feature implementation plans, following Epoch monorepo structure. | None |
+| [breakdown-feature-prd](../skills/breakdown-feature-prd/SKILL.md) | Prompt for creating Product Requirements Documents (PRDs) for new features, based on an Epic. | None |
+| [breakdown-plan](../skills/breakdown-plan/SKILL.md) | Issue Planning and Automation prompt that generates comprehensive project plans with Epic > Feature > Story/Enabler > Test hierarchy, dependencies, priorities, and automated tracking. | None |
+| [breakdown-test](../skills/breakdown-test/SKILL.md) | Test Planning and Quality Assurance prompt that generates comprehensive test strategies, task breakdowns, and quality validation plans for GitHub projects. | None |
+| [centos-linux-triage](../skills/centos-linux-triage/SKILL.md) | Triage and resolve CentOS issues using RHEL-compatible tooling, SELinux-aware practices, and firewalld. | None |
| [chrome-devtools](../skills/chrome-devtools/SKILL.md) | Expert-level browser automation, debugging, and performance analysis using Chrome DevTools MCP. Use for interacting with web pages, capturing screenshots, analyzing network traffic, and profiling performance. | None |
+| [code-exemplars-blueprint-generator](../skills/code-exemplars-blueprint-generator/SKILL.md) | Technology-agnostic prompt generator that creates customizable AI prompts for scanning codebases and identifying high-quality code exemplars. Supports multiple programming languages (.NET, Java, JavaScript, TypeScript, React, Angular, Python) with configurable analysis depth, categorization methods, and documentation formats to establish coding standards and maintain consistency across development teams. | None |
+| [comment-code-generate-a-tutorial](../skills/comment-code-generate-a-tutorial/SKILL.md) | Transform this Python script into a polished, beginner-friendly project by refactoring the code, adding clear instructional comments, and generating a complete markdown tutorial. | None |
+| [containerize-aspnet-framework](../skills/containerize-aspnet-framework/SKILL.md) | Containerize an ASP.NET .NET Framework project by creating Dockerfile and .dockerfile files customized for the project. | None |
+| [containerize-aspnetcore](../skills/containerize-aspnetcore/SKILL.md) | Containerize an ASP.NET Core project by creating Dockerfile and .dockerfile files customized for the project. | None |
+| [context-map](../skills/context-map/SKILL.md) | Generate a map of all files relevant to a task before making changes | None |
+| [conventional-commit](../skills/conventional-commit/SKILL.md) | Prompt and workflow for generating conventional commit messages using a structured XML format. Guides users to create standardized, descriptive commit messages in line with the Conventional Commits specification, including instructions, examples, and validation. | None |
+| [convert-plaintext-to-md](../skills/convert-plaintext-to-md/SKILL.md) | Convert a text-based document to markdown following instructions from prompt, or if a documented option is passed, follow the instructions for that option. | None |
| [copilot-cli-quickstart](../skills/copilot-cli-quickstart/SKILL.md) | Use this skill when someone wants to learn GitHub Copilot CLI from scratch. Offers interactive step-by-step tutorials with separate Developer and Non-Developer tracks, plus on-demand Q&A. Just say "start tutorial" or ask a question! Note: This skill targets GitHub Copilot CLI specifically and uses CLI-specific tools (ask_user, sql, fetch_copilot_cli_documentation). | None |
+| [copilot-instructions-blueprint-generator](../skills/copilot-instructions-blueprint-generator/SKILL.md) | Technology-agnostic blueprint generator for creating comprehensive copilot-instructions.md files that guide GitHub Copilot to produce code consistent with project standards, architecture patterns, and exact technology versions by analyzing existing codebase patterns and avoiding assumptions. | None |
| [copilot-sdk](../skills/copilot-sdk/SKILL.md) | Build agentic applications with GitHub Copilot SDK. Use when embedding AI agents in apps, creating custom tools, implementing streaming responses, managing sessions, connecting to MCP servers, or creating custom agents. Triggers on Copilot SDK, GitHub SDK, agentic app, embed Copilot, programmable agent, MCP server, custom agent. | None |
+| [cosmosdb-datamodeling](../skills/cosmosdb-datamodeling/SKILL.md) | Step-by-step guide for capturing key application requirements for NoSQL use-case and produce Azure Cosmos DB Data NoSQL Model design using best practices and common patterns, artifacts_produced: "cosmosdb_requirements.md" file and "cosmosdb_data_model.md" file | None |
+| [create-agentsmd](../skills/create-agentsmd/SKILL.md) | Prompt for generating an AGENTS.md file for a repository | None |
+| [create-architectural-decision-record](../skills/create-architectural-decision-record/SKILL.md) | Create an Architectural Decision Record (ADR) document for AI-optimized decision documentation. | None |
+| [create-github-action-workflow-specification](../skills/create-github-action-workflow-specification/SKILL.md) | Create a formal specification for an existing GitHub Actions CI/CD workflow, optimized for AI consumption and workflow maintenance. | None |
+| [create-github-issue-feature-from-specification](../skills/create-github-issue-feature-from-specification/SKILL.md) | Create GitHub Issue for feature request from specification file using feature_request.yml template. | None |
+| [create-github-issues-feature-from-implementation-plan](../skills/create-github-issues-feature-from-implementation-plan/SKILL.md) | Create GitHub Issues from implementation plan phases using feature_request.yml or chore_request.yml templates. | None |
+| [create-github-issues-for-unmet-specification-requirements](../skills/create-github-issues-for-unmet-specification-requirements/SKILL.md) | Create GitHub Issues for unimplemented requirements from specification files using feature_request.yml template. | None |
+| [create-github-pull-request-from-specification](../skills/create-github-pull-request-from-specification/SKILL.md) | Create GitHub Pull Request for feature request from specification file using pull_request_template.md template. | None |
+| [create-implementation-plan](../skills/create-implementation-plan/SKILL.md) | Create a new implementation plan file for new features, refactoring existing code or upgrading packages, design, architecture or infrastructure. | None |
+| [create-llms](../skills/create-llms/SKILL.md) | Create an llms.txt file from scratch based on repository structure following the llms.txt specification at https://llmstxt.org/ | None |
+| [create-oo-component-documentation](../skills/create-oo-component-documentation/SKILL.md) | Create comprehensive, standardized documentation for object-oriented components following industry best practices and architectural documentation standards. | None |
+| [create-readme](../skills/create-readme/SKILL.md) | Create a README.md file for the project | None |
+| [create-specification](../skills/create-specification/SKILL.md) | Create a new specification file for the solution, optimized for Generative AI consumption. | None |
+| [create-spring-boot-java-project](../skills/create-spring-boot-java-project/SKILL.md) | Create Spring Boot Java Project Skeleton | None |
+| [create-spring-boot-kotlin-project](../skills/create-spring-boot-kotlin-project/SKILL.md) | Create Spring Boot Kotlin Project Skeleton | None |
+| [create-technical-spike](../skills/create-technical-spike/SKILL.md) | Create time-boxed technical spike documents for researching and resolving critical development decisions before implementation. | None |
+| [create-tldr-page](../skills/create-tldr-page/SKILL.md) | Create a tldr page from documentation URLs and command examples, requiring both URL and command name. | None |
| [create-web-form](../skills/create-web-form/SKILL.md) | Create robust, accessible web forms with best practices for HTML structure, CSS styling, JavaScript interactivity, form validation, and server-side processing. Use when asked to "create a form", "build a web form", "add a contact form", "make a signup form", or when building any HTML form with data handling. Covers PHP and Python backends, MySQL database integration, REST APIs, XML data exchange, accessibility (ARIA), and progressive web apps. | `references/accessibility.md` `references/aria-form-role.md` `references/css-styling.md` `references/form-basics.md` `references/form-controls.md` `references/form-data-handling.md` `references/html-form-elements.md` `references/html-form-example.md` `references/hypertext-transfer-protocol.md` `references/javascript.md` `references/php-cookies.md` `references/php-forms.md` `references/php-json.md` `references/php-mysql-database.md` `references/progressive-web-app.md` `references/python-as-web-framework.md` `references/python-contact-form.md` `references/python-flask-app.md` `references/python-flask.md` `references/security.md` `references/styling-web-forms.md` `references/web-api.md` `references/web-performance.md` `references/xml.md` |
+| [csharp-async](../skills/csharp-async/SKILL.md) | Get best practices for C# async programming | None |
+| [csharp-docs](../skills/csharp-docs/SKILL.md) | Ensure that C# types are documented with XML comments and follow best practices for documentation. | None |
+| [csharp-mcp-server-generator](../skills/csharp-mcp-server-generator/SKILL.md) | Generate a complete MCP server project in C# with tools, prompts, and proper configuration | None |
+| [csharp-mstest](../skills/csharp-mstest/SKILL.md) | Get best practices for MSTest 3.x/4.x unit testing, including modern assertion APIs and data-driven tests | None |
+| [csharp-nunit](../skills/csharp-nunit/SKILL.md) | Get best practices for NUnit unit testing, including data-driven tests | None |
+| [csharp-tunit](../skills/csharp-tunit/SKILL.md) | Get best practices for TUnit unit testing, including data-driven tests | None |
+| [csharp-xunit](../skills/csharp-xunit/SKILL.md) | Get best practices for XUnit unit testing, including data-driven tests | None |
+| [dataverse-python-advanced-patterns](../skills/dataverse-python-advanced-patterns/SKILL.md) | Generate production code for Dataverse SDK using advanced patterns, error handling, and optimization techniques. | None |
+| [dataverse-python-production-code](../skills/dataverse-python-production-code/SKILL.md) | Generate production-ready Python code using Dataverse SDK with error handling, optimization, and best practices | None |
+| [dataverse-python-quickstart](../skills/dataverse-python-quickstart/SKILL.md) | Generate Python SDK setup + CRUD + bulk + paging snippets using official patterns. | None |
+| [dataverse-python-usecase-builder](../skills/dataverse-python-usecase-builder/SKILL.md) | Generate complete solutions for specific Dataverse SDK use cases with architecture recommendations | None |
+| [debian-linux-triage](../skills/debian-linux-triage/SKILL.md) | Triage and resolve Debian Linux issues with apt, systemd, and AppArmor-aware guidance. | None |
+| [declarative-agents](../skills/declarative-agents/SKILL.md) | Complete development kit for Microsoft 365 Copilot declarative agents with three comprehensive workflows (basic, advanced, validation), TypeSpec support, and Microsoft 365 Agents Toolkit integration | None |
+| [devops-rollout-plan](../skills/devops-rollout-plan/SKILL.md) | Generate comprehensive rollout plans with preflight checks, step-by-step deployment, verification signals, rollback procedures, and communication plans for infrastructure and application changes | None |
+| [documentation-writer](../skills/documentation-writer/SKILL.md) | DiΓ‘taxis Documentation Expert. An expert technical writer specializing in creating high-quality software documentation, guided by the principles and structure of the DiΓ‘taxis technical documentation authoring framework. | None |
+| [dotnet-best-practices](../skills/dotnet-best-practices/SKILL.md) | Ensure .NET/C# code meets best practices for the solution/project. | None |
+| [dotnet-design-pattern-review](../skills/dotnet-design-pattern-review/SKILL.md) | Review the C#/.NET code for design pattern implementation and suggest improvements. | None |
+| [dotnet-upgrade](../skills/dotnet-upgrade/SKILL.md) | Ready-to-use prompts for comprehensive .NET framework upgrade analysis and execution | None |
+| [editorconfig](../skills/editorconfig/SKILL.md) | Generates a comprehensive and best-practice-oriented .editorconfig file based on project analysis and user preferences. | None |
+| [ef-core](../skills/ef-core/SKILL.md) | Get best practices for Entity Framework Core | None |
| [excalidraw-diagram-generator](../skills/excalidraw-diagram-generator/SKILL.md) | Generate Excalidraw diagrams from natural language descriptions. Use when asked to "create a diagram", "make a flowchart", "visualize a process", "draw a system architecture", "create a mind map", or "generate an Excalidraw file". Supports flowcharts, relationship diagrams, mind maps, and system architecture diagrams. Outputs .excalidraw JSON files that can be opened directly in Excalidraw. | `references/element-types.md` `references/excalidraw-schema.md` `scripts/.gitignore` `scripts/README.md` `scripts/add-arrow.py` `scripts/add-icon-to-diagram.py` `scripts/split-excalidraw-library.py` `templates/business-flow-swimlane-template.excalidraw` `templates/class-diagram-template.excalidraw` `templates/data-flow-diagram-template.excalidraw` `templates/er-diagram-template.excalidraw` `templates/flowchart-template.excalidraw` `templates/mindmap-template.excalidraw` `templates/relationship-template.excalidraw` `templates/sequence-diagram-template.excalidraw` |
| [fabric-lakehouse](../skills/fabric-lakehouse/SKILL.md) | Use this skill to get context about Fabric Lakehouse and its features for software systems and AI-powered functions. It offers descriptions of Lakehouse data components, organization with schemas and shortcuts, access control, and code examples. This skill supports users in designing, building, and optimizing Lakehouse solutions using best practices. | `references/getdata.md` `references/pyspark.md` |
+| [fedora-linux-triage](../skills/fedora-linux-triage/SKILL.md) | Triage and resolve Fedora issues with dnf, systemd, and SELinux-aware guidance. | None |
+| [finalize-agent-prompt](../skills/finalize-agent-prompt/SKILL.md) | Finalize prompt file using the role of an AI agent to polish the prompt for the end user. | None |
| [finnish-humanizer](../skills/finnish-humanizer/SKILL.md) | Detect and remove AI-generated markers from Finnish text, making it sound like a native Finnish speaker wrote it. Use when asked to "humanize", "naturalize", or "remove AI feel" from Finnish text, or when editing .md/.txt files containing Finnish content. Identifies 26 patterns (12 Finnish-specific + 14 universal) and 4 style markers. | `references/patterns.md` |
+| [first-ask](../skills/first-ask/SKILL.md) | Interactive, input-tool powered, task refinement workflow: interrogates scope, deliverables, constraints before carrying out the task; Requires the Joyride extension. | None |
| [fluentui-blazor](../skills/fluentui-blazor/SKILL.md) | Guide for using the Microsoft Fluent UI Blazor component library (Microsoft.FluentUI.AspNetCore.Components NuGet package) in Blazor applications. Use this when the user is building a Blazor app with Fluent UI components, setting up the library, using FluentUI components like FluentButton, FluentDataGrid, FluentDialog, FluentToast, FluentNavMenu, FluentTextField, FluentSelect, FluentAutocomplete, FluentDesignTheme, or any component prefixed with "Fluent". Also use when troubleshooting missing providers, JS interop issues, or theming. | `references/DATAGRID.md` `references/LAYOUT-AND-NAVIGATION.md` `references/SETUP.md` `references/THEMING.md` |
+| [folder-structure-blueprint-generator](../skills/folder-structure-blueprint-generator/SKILL.md) | Comprehensive technology-agnostic prompt for analyzing and documenting project folder structures. Auto-detects project types (.NET, Java, React, Angular, Python, Node.js, Flutter), generates detailed blueprints with visualization options, naming conventions, file placement patterns, and extension templates for maintaining consistent code organization across diverse technology stacks. | None |
+| [gen-specs-as-issues](../skills/gen-specs-as-issues/SKILL.md) | This workflow guides you through a systematic approach to identify missing features, prioritize them, and create detailed specifications for implementation. | None |
+| [generate-custom-instructions-from-codebase](../skills/generate-custom-instructions-from-codebase/SKILL.md) | Migration and code evolution instructions generator for GitHub Copilot. Analyzes differences between two project versions (branches, commits, or releases) to create precise instructions allowing Copilot to maintain consistency during technology migrations, major refactoring, or framework version upgrades. | None |
| [gh-cli](../skills/gh-cli/SKILL.md) | GitHub CLI (gh) comprehensive reference for repositories, issues, pull requests, Actions, projects, releases, gists, codespaces, organizations, extensions, and all GitHub operations from the command line. | None |
| [git-commit](../skills/git-commit/SKILL.md) | Execute git commit with conventional commit message analysis, intelligent staging, and message generation. Use when user asks to commit changes, create a git commit, or mentions "/commit". Supports: (1) Auto-detecting type and scope from changes, (2) Generating conventional commit messages from diff, (3) Interactive commit with optional type/scope/description overrides, (4) Intelligent file staging for logical grouping | None |
+| [git-flow-branch-creator](../skills/git-flow-branch-creator/SKILL.md) | Intelligent Git Flow branch creator that analyzes git status/diff and creates appropriate branches following the nvie Git Flow branching model. | None |
+| [github-copilot-starter](../skills/github-copilot-starter/SKILL.md) | Set up complete GitHub Copilot configuration for a new project based on technology stack | None |
| [github-issues](../skills/github-issues/SKILL.md) | Create, update, and manage GitHub issues using MCP tools. Use this skill when users want to create bug reports, feature requests, or task issues, update existing issues, add labels/assignees/milestones, or manage issue workflows. Triggers on requests like "create an issue", "file a bug", "request a feature", "update issue X", or any GitHub issue management task. | `references/templates.md` |
+| [go-mcp-server-generator](../skills/go-mcp-server-generator/SKILL.md) | Generate a complete Go MCP server project with proper structure, dependencies, and implementation using the official github.com/modelcontextprotocol/go-sdk. | None |
| [image-manipulation-image-magick](../skills/image-manipulation-image-magick/SKILL.md) | Process and manipulate images using ImageMagick. Supports resizing, format conversion, batch processing, and retrieving image metadata. Use when working with images, creating thumbnails, resizing wallpapers, or performing batch image operations. | None |
+| [java-add-graalvm-native-image-support](../skills/java-add-graalvm-native-image-support/SKILL.md) | GraalVM Native Image expert that adds native image support to Java applications, builds the project, analyzes build errors, applies fixes, and iterates until successful compilation using Oracle best practices. | None |
+| [java-docs](../skills/java-docs/SKILL.md) | Ensure that Java types are documented with Javadoc comments and follow best practices for documentation. | None |
+| [java-junit](../skills/java-junit/SKILL.md) | Get best practices for JUnit 5 unit testing, including data-driven tests | None |
+| [java-mcp-server-generator](../skills/java-mcp-server-generator/SKILL.md) | Generate a complete Model Context Protocol server project in Java using the official MCP Java SDK with reactive streams and optional Spring Boot integration. | None |
+| [java-refactoring-extract-method](../skills/java-refactoring-extract-method/SKILL.md) | Refactoring using Extract Methods in Java Language | None |
+| [java-refactoring-remove-parameter](../skills/java-refactoring-remove-parameter/SKILL.md) | Refactoring using Remove Parameter in Java Language | None |
+| [java-springboot](../skills/java-springboot/SKILL.md) | Get best practices for developing applications with Spring Boot. | None |
+| [javascript-typescript-jest](../skills/javascript-typescript-jest/SKILL.md) | Best practices for writing JavaScript/TypeScript tests using Jest, including mocking strategies, test structure, and common patterns. | None |
+| [kotlin-mcp-server-generator](../skills/kotlin-mcp-server-generator/SKILL.md) | Generate a complete Kotlin MCP server project with proper structure, dependencies, and implementation using the official io.modelcontextprotocol:kotlin-sdk library. | None |
+| [kotlin-springboot](../skills/kotlin-springboot/SKILL.md) | Get best practices for developing applications with Spring Boot and Kotlin. | None |
| [legacy-circuit-mockups](../skills/legacy-circuit-mockups/SKILL.md) | Generate breadboard circuit mockups and visual diagrams using HTML5 Canvas drawing techniques. Use when asked to create circuit layouts, visualize electronic component placements, draw breadboard diagrams, mockup 6502 builds, generate retro computer schematics, or design vintage electronics projects. Supports 555 timers, W65C02S microprocessors, 28C256 EEPROMs, W65C22 VIA chips, 7400-series logic gates, LEDs, resistors, capacitors, switches, buttons, crystals, and wires. | `references/28256-eeprom.md` `references/555.md` `references/6502.md` `references/6522.md` `references/6C62256.md` `references/7400-series.md` `references/assembly-compiler.md` `references/assembly-language.md` `references/basic-electronic-components.md` `references/breadboard.md` `references/common-breadboard-components.md` `references/connecting-electronic-components.md` `references/emulator-28256-eeprom.md` `references/emulator-6502.md` `references/emulator-6522.md` `references/emulator-6C62256.md` `references/emulator-lcd.md` `references/lcd.md` `references/minipro.md` `references/t48eeprom-programmer.md` |
| [make-repo-contribution](../skills/make-repo-contribution/SKILL.md) | All changes to code must follow the guidance documented in the repository. Before any issue is filed, branch is made, commits generated, or pull request (or PR) created, a search must be done to ensure the right steps are followed. Whenever asked to create an issue, commit messages, to push code, or create a PR, use this skill so everything is done correctly. | `assets/issue-template.md` `assets/pr-template.md` |
| [make-skill-template](../skills/make-skill-template/SKILL.md) | Create new Agent Skills for GitHub Copilot from prompts or by duplicating this template. Use when asked to "create a skill", "make a new skill", "scaffold a skill", or when building specialized AI capabilities with bundled resources. Generates SKILL.md files with proper frontmatter, directory structure, and optional scripts/references/assets folders. | None |
| [markdown-to-html](../skills/markdown-to-html/SKILL.md) | Convert Markdown files to HTML similar to `marked.js`, `pandoc`, `gomarkdown/markdown`, or similar tools; or writing custom script to convert markdown to html and/or working on web template systems like `jekyll/jekyll`, `gohugoio/hugo`, or similar web templating systems that utilize markdown documents, converting them to html. Use when asked to "convert markdown to html", "transform md to html", "render markdown", "generate html from markdown", or when working with .md files and/or web a templating system that converts markdown to HTML output. Supports CLI and Node.js workflows with GFM, CommonMark, and standard Markdown flavors. | `references/basic-markdown-to-html.md` `references/basic-markdown.md` `references/code-blocks-to-html.md` `references/code-blocks.md` `references/collapsed-sections-to-html.md` `references/collapsed-sections.md` `references/gomarkdown.md` `references/hugo.md` `references/jekyll.md` `references/marked.md` `references/pandoc.md` `references/tables-to-html.md` `references/tables.md` `references/writing-mathematical-expressions-to-html.md` `references/writing-mathematical-expressions.md` |
| [mcp-cli](../skills/mcp-cli/SKILL.md) | Interface for MCP (Model Context Protocol) servers via CLI. Use when you need to interact with external tools, APIs, or data sources through MCP servers, list available MCP servers/tools, or call MCP tools from command line. | None |
+| [mcp-copilot-studio-server-generator](../skills/mcp-copilot-studio-server-generator/SKILL.md) | Generate a complete MCP server implementation optimized for Copilot Studio integration with proper schema constraints and streamable HTTP support | None |
+| [mcp-create-adaptive-cards](../skills/mcp-create-adaptive-cards/SKILL.md) | Skill converted from mcp-create-adaptive-cards.prompt.md | None |
+| [mcp-create-declarative-agent](../skills/mcp-create-declarative-agent/SKILL.md) | Skill converted from mcp-create-declarative-agent.prompt.md | None |
+| [mcp-deploy-manage-agents](../skills/mcp-deploy-manage-agents/SKILL.md) | Skill converted from mcp-deploy-manage-agents.prompt.md | None |
| [meeting-minutes](../skills/meeting-minutes/SKILL.md) | Generate concise, actionable meeting minutes for internal meetings. Includes metadata, attendees, agenda, decisions, action items (owner + due date), and follow-up steps. | None |
+| [memory-merger](../skills/memory-merger/SKILL.md) | Merges mature lessons from a domain memory file into its instruction file. Syntax: `/memory-merger >domain [scope]` where scope is `global` (default), `user`, `workspace`, or `ws`. | None |
| [microsoft-code-reference](../skills/microsoft-code-reference/SKILL.md) | Look up Microsoft API references, find working code samples, and verify SDK code is correct. Use when working with Azure SDKs, .NET libraries, or Microsoft APIsβto find the right method, check parameters, get working examples, or troubleshoot errors. Catches hallucinated methods, wrong signatures, and deprecated patterns by querying official docs. | None |
| [microsoft-docs](../skills/microsoft-docs/SKILL.md) | Query official Microsoft documentation to find concepts, tutorials, and code examples across Azure, .NET, Agent Framework, Aspire, VS Code, GitHub, and more. Uses Microsoft Learn MCP as the default, with Context7 and Aspire MCP for content that lives outside learn.microsoft.com. | None |
| [microsoft-skill-creator](../skills/microsoft-skill-creator/SKILL.md) | Create agent skills for Microsoft technologies using Learn MCP tools. Use when users want to create a skill that teaches agents about any Microsoft technology, library, framework, or service (Azure, .NET, M365, VS Code, Bicep, etc.). Investigates topics deeply, then generates a hybrid skill storing essential knowledge locally while enabling dynamic deeper investigation. | `references/skill-templates.md` |
+| [mkdocs-translations](../skills/mkdocs-translations/SKILL.md) | Generate a language translation for a mkdocs documentation stack. | None |
+| [model-recommendation](../skills/model-recommendation/SKILL.md) | Analyze chatmode or prompt files and recommend optimal AI models based on task complexity, required capabilities, and cost-efficiency | None |
+| [multi-stage-dockerfile](../skills/multi-stage-dockerfile/SKILL.md) | Create optimized multi-stage Dockerfiles for any language or framework | None |
+| [my-issues](../skills/my-issues/SKILL.md) | List my issues in the current repository | None |
+| [my-pull-requests](../skills/my-pull-requests/SKILL.md) | List my pull requests in the current repository | None |
| [nano-banana-pro-openrouter](../skills/nano-banana-pro-openrouter/SKILL.md) | Generate or edit images via OpenRouter with the Gemini 3 Pro Image model. Use for prompt-only image generation, image edits, and multi-image compositing; supports 1K/2K/4K output. | `assets/SYSTEM_TEMPLATE` `scripts/generate_image.py` |
+| [next-intl-add-language](../skills/next-intl-add-language/SKILL.md) | Add new language to a Next.js + next-intl application | None |
| [nuget-manager](../skills/nuget-manager/SKILL.md) | Manage NuGet packages in .NET projects/solutions. Use this skill when adding, removing, or updating NuGet package versions. It enforces using `dotnet` CLI for package management and provides strict procedures for direct file edits only when updating versions. | None |
+| [openapi-to-application-code](../skills/openapi-to-application-code/SKILL.md) | Generate a complete, production-ready application from an OpenAPI specification | None |
| [pdftk-server](../skills/pdftk-server/SKILL.md) | Skill for using the command-line tool pdftk (PDFtk Server) for working with PDF files. Use when asked to merge PDFs, split PDFs, rotate pages, encrypt or decrypt PDFs, fill PDF forms, apply watermarks, stamp overlays, extract metadata, burst documents into pages, repair corrupted PDFs, attach or extract files, or perform any PDF manipulation from the command line. | `references/download.md` `references/pdftk-cli-examples.md` `references/pdftk-man-page.md` `references/pdftk-server-license.md` `references/third-party-materials.md` |
| [penpot-uiux-design](../skills/penpot-uiux-design/SKILL.md) | Comprehensive guide for creating professional UI/UX designs in Penpot using MCP tools. Use this skill when: (1) Creating new UI/UX designs for web, mobile, or desktop applications, (2) Building design systems with components and tokens, (3) Designing dashboards, forms, navigation, or landing pages, (4) Applying accessibility standards and best practices, (5) Following platform guidelines (iOS, Android, Material Design), (6) Reviewing or improving existing Penpot designs for usability. Triggers: "design a UI", "create interface", "build layout", "design dashboard", "create form", "design landing page", "make it accessible", "design system", "component library". | `references/accessibility.md` `references/component-patterns.md` `references/platform-guidelines.md` `references/setup-troubleshooting.md` |
+| [php-mcp-server-generator](../skills/php-mcp-server-generator/SKILL.md) | Generate a complete PHP Model Context Protocol server project with tools, resources, prompts, and tests using the official PHP SDK | None |
| [plantuml-ascii](../skills/plantuml-ascii/SKILL.md) | Generate ASCII art diagrams using PlantUML text mode. Use when user asks to create ASCII diagrams, text-based diagrams, terminal-friendly diagrams, or mentions plantuml ascii, text diagram, ascii art diagram. Supports: Converting PlantUML diagrams to ASCII art, Creating sequence diagrams, class diagrams, flowcharts in ASCII format, Generating Unicode-enhanced ASCII art with -utxt flag | None |
+| [playwright-automation-fill-in-form](../skills/playwright-automation-fill-in-form/SKILL.md) | Automate filling in a form using Playwright MCP | None |
+| [playwright-explore-website](../skills/playwright-explore-website/SKILL.md) | Website exploration for testing using Playwright MCP | None |
+| [playwright-generate-test](../skills/playwright-generate-test/SKILL.md) | Generate a Playwright test based on a scenario using Playwright MCP | None |
| [polyglot-test-agent](../skills/polyglot-test-agent/SKILL.md) | Generates comprehensive, workable unit tests for any programming language using a multi-agent pipeline. Use when asked to generate tests, write unit tests, improve test coverage, add test coverage, create test files, or test a codebase. Supports C#, TypeScript, JavaScript, Python, Go, Rust, Java, and more. Orchestrates research, planning, and implementation phases to produce tests that compile, pass, and follow project conventions. | `unit-test-generation.prompt.md` |
+| [postgresql-code-review](../skills/postgresql-code-review/SKILL.md) | PostgreSQL-specific code review assistant focusing on PostgreSQL best practices, anti-patterns, and unique quality standards. Covers JSONB operations, array usage, custom types, schema design, function optimization, and PostgreSQL-exclusive security features like Row Level Security (RLS). | None |
+| [postgresql-optimization](../skills/postgresql-optimization/SKILL.md) | PostgreSQL-specific development assistant focusing on unique PostgreSQL features, advanced data types, and PostgreSQL-exclusive capabilities. Covers JSONB operations, array types, custom types, range/geometric types, full-text search, window functions, and PostgreSQL extensions ecosystem. | None |
+| [power-apps-code-app-scaffold](../skills/power-apps-code-app-scaffold/SKILL.md) | Scaffold a complete Power Apps Code App project with PAC CLI setup, SDK integration, and connector configuration | None |
+| [power-bi-dax-optimization](../skills/power-bi-dax-optimization/SKILL.md) | Comprehensive Power BI DAX formula optimization prompt for improving performance, readability, and maintainability of DAX calculations. | None |
+| [power-bi-model-design-review](../skills/power-bi-model-design-review/SKILL.md) | Comprehensive Power BI data model design review prompt for evaluating model architecture, relationships, and optimization opportunities. | None |
+| [power-bi-performance-troubleshooting](../skills/power-bi-performance-troubleshooting/SKILL.md) | Systematic Power BI performance troubleshooting prompt for identifying, diagnosing, and resolving performance issues in Power BI models, reports, and queries. | None |
+| [power-bi-report-design-consultation](../skills/power-bi-report-design-consultation/SKILL.md) | Power BI report visualization design prompt for creating effective, user-friendly, and accessible reports with optimal chart selection and layout design. | None |
+| [power-platform-mcp-connector-suite](../skills/power-platform-mcp-connector-suite/SKILL.md) | Generate complete Power Platform custom connector with MCP integration for Copilot Studio - includes schema generation, troubleshooting, and validation | None |
| [powerbi-modeling](../skills/powerbi-modeling/SKILL.md) | Power BI semantic modeling assistant for building optimized data models. Use when working with Power BI semantic models, creating measures, designing star schemas, configuring relationships, implementing RLS, or optimizing model performance. Triggers on queries about DAX calculations, table relationships, dimension/fact table design, naming conventions, model documentation, cardinality, cross-filter direction, calculation groups, and data model best practices. Always connects to the active model first using power-bi-modeling MCP tools to understand the data structure before providing guidance. | `references/MEASURES-DAX.md` `references/PERFORMANCE.md` `references/RELATIONSHIPS.md` `references/RLS.md` `references/STAR-SCHEMA.md` |
| [prd](../skills/prd/SKILL.md) | Generate high-quality Product Requirements Documents (PRDs) for software systems and AI-powered features. Includes executive summaries, user stories, technical specifications, and risk analysis. | None |
+| [project-workflow-analysis-blueprint-generator](../skills/project-workflow-analysis-blueprint-generator/SKILL.md) | Comprehensive technology-agnostic prompt generator for documenting end-to-end application workflows. Automatically detects project architecture patterns, technology stacks, and data flow patterns to generate detailed implementation blueprints covering entry points, service layers, data access, error handling, and testing approaches across multiple technologies including .NET, Java/Spring, React, and microservices architectures. | None |
+| [prompt-builder](../skills/prompt-builder/SKILL.md) | Guide users through creating high-quality GitHub Copilot prompts with proper structure, tools, and best practices. | None |
+| [pytest-coverage](../skills/pytest-coverage/SKILL.md) | Run pytest tests with coverage, discover lines missing coverage, and increase coverage to 100%. | None |
+| [python-mcp-server-generator](../skills/python-mcp-server-generator/SKILL.md) | Generate a complete MCP server project in Python with tools, resources, and proper configuration | None |
| [quasi-coder](../skills/quasi-coder/SKILL.md) | Expert 10x engineer skill for interpreting and implementing code from shorthand, quasi-code, and natural language descriptions. Use when collaborators provide incomplete code snippets, pseudo-code, or descriptions with potential typos or incorrect terminology. Excels at translating non-technical or semi-technical descriptions into production-quality code. | None |
+| [readme-blueprint-generator](../skills/readme-blueprint-generator/SKILL.md) | Intelligent README.md generation prompt that analyzes project documentation structure and creates comprehensive repository documentation. Scans .github/copilot directory files and copilot-instructions.md to extract project information, technology stack, architecture, development workflow, coding standards, and testing approaches while generating well-structured markdown documentation with proper formatting, cross-references, and developer-focused content. | None |
| [refactor](../skills/refactor/SKILL.md) | Surgical code refactoring to improve maintainability without changing behavior. Covers extracting functions, renaming variables, breaking down god functions, improving type safety, eliminating code smells, and applying design patterns. Less drastic than repo-rebuilder; use for gradual improvements. | None |
+| [refactor-method-complexity-reduce](../skills/refactor-method-complexity-reduce/SKILL.md) | Refactor given method `${input:methodName}` to reduce its cognitive complexity to `${input:complexityThreshold}` or below, by extracting helper methods. | None |
+| [refactor-plan](../skills/refactor-plan/SKILL.md) | Plan a multi-file refactor with proper sequencing and rollback steps | None |
+| [remember](../skills/remember/SKILL.md) | Transforms lessons learned into domain-organized memory instructions (global or workspace). Syntax: `/remember [>domain [scope]] lesson clue` where scope is `global` (default), `user`, `workspace`, or `ws`. | None |
+| [remember-interactive-programming](../skills/remember-interactive-programming/SKILL.md) | A micro-prompt that reminds the agent that it is an interactive programmer. Works great in Clojure when Copilot has access to the REPL (probably via Backseat Driver). Will work with any system that has a live REPL that the agent can use. Adapt the prompt with any specific reminders in your workflow and/or workspace. | None |
+| [repo-story-time](../skills/repo-story-time/SKILL.md) | Generate a comprehensive repository summary and narrative story from commit history | None |
+| [review-and-refactor](../skills/review-and-refactor/SKILL.md) | Review and refactor code in your project according to defined instructions | None |
+| [ruby-mcp-server-generator](../skills/ruby-mcp-server-generator/SKILL.md) | Generate a complete Model Context Protocol server project in Ruby using the official MCP Ruby SDK gem. | None |
+| [rust-mcp-server-generator](../skills/rust-mcp-server-generator/SKILL.md) | Generate a complete Rust Model Context Protocol server project with tools, prompts, resources, and tests using the official rmcp SDK | None |
| [scoutqa-test](../skills/scoutqa-test/SKILL.md) | This skill should be used when the user asks to "test this website", "run exploratory testing", "check for accessibility issues", "verify the login flow works", "find bugs on this page", or requests automated QA testing. Triggers on web application testing scenarios including smoke tests, accessibility audits, e-commerce flows, and user flow validation using ScoutQA CLI. IMPORTANT: Use this skill proactively after implementing web application features to verify they work correctly - don't wait for the user to ask for testing. | None |
+| [shuffle-json-data](../skills/shuffle-json-data/SKILL.md) | Shuffle repetitive JSON objects safely by validating schema consistency before randomising entries. | None |
| [snowflake-semanticview](../skills/snowflake-semanticview/SKILL.md) | Create, alter, and validate Snowflake semantic views using Snowflake CLI (snow). Use when asked to build or troubleshoot semantic views/semantic layer definitions with CREATE/ALTER SEMANTIC VIEW, to validate semantic-view DDL against Snowflake via CLI, or to guide Snowflake CLI installation and connection setup. | None |
| [sponsor-finder](../skills/sponsor-finder/SKILL.md) | Find which of a GitHub repository's dependencies are sponsorable via GitHub Sponsors. Uses deps.dev API for dependency resolution across npm, PyPI, Cargo, Go, RubyGems, Maven, and NuGet. Checks npm funding metadata, FUNDING.yml files, and web search. Verifies every link. Shows direct and transitive dependencies with OSSF Scorecard health data. Invoke with /sponsor followed by a GitHub owner/repo (e.g. "/sponsor expressjs/express"). | None |
+| [sql-code-review](../skills/sql-code-review/SKILL.md) | Universal SQL code review assistant that performs comprehensive security, maintainability, and code quality analysis across all SQL databases (MySQL, PostgreSQL, SQL Server, Oracle). Focuses on SQL injection prevention, access control, code standards, and anti-pattern detection. Complements SQL optimization prompt for complete development coverage. | None |
+| [sql-optimization](../skills/sql-optimization/SKILL.md) | Universal SQL performance optimization assistant for comprehensive query tuning, indexing strategies, and database performance analysis across all SQL databases (MySQL, PostgreSQL, SQL Server, Oracle). Provides execution plan analysis, pagination optimization, batch operations, and performance monitoring guidance. | None |
+| [structured-autonomy-generate](../skills/structured-autonomy-generate/SKILL.md) | Structured Autonomy Implementation Generator Prompt | None |
+| [structured-autonomy-implement](../skills/structured-autonomy-implement/SKILL.md) | Structured Autonomy Implementation Prompt | None |
+| [structured-autonomy-plan](../skills/structured-autonomy-plan/SKILL.md) | Structured Autonomy Planning Prompt | None |
+| [suggest-awesome-github-copilot-agents](../skills/suggest-awesome-github-copilot-agents/SKILL.md) | Suggest relevant GitHub Copilot Custom Agents files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing custom agents in this repository, and identifying outdated agents that need updates. | None |
+| [suggest-awesome-github-copilot-instructions](../skills/suggest-awesome-github-copilot-instructions/SKILL.md) | Suggest relevant GitHub Copilot instruction files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing instructions in this repository, and identifying outdated instructions that need updates. | None |
+| [suggest-awesome-github-copilot-prompts](../skills/suggest-awesome-github-copilot-prompts/SKILL.md) | Suggest relevant GitHub Copilot prompt files from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing prompts in this repository, and identifying outdated prompts that need updates. | None |
+| [suggest-awesome-github-copilot-skills](../skills/suggest-awesome-github-copilot-skills/SKILL.md) | Suggest relevant GitHub Copilot skills from the awesome-copilot repository based on current repository context and chat history, avoiding duplicates with existing skills in this repository, and identifying outdated skills that need updates. | None |
+| [swift-mcp-server-generator](../skills/swift-mcp-server-generator/SKILL.md) | Generate a complete Model Context Protocol server project in Swift using the official MCP Swift SDK package. | None |
+| [technology-stack-blueprint-generator](../skills/technology-stack-blueprint-generator/SKILL.md) | Comprehensive technology stack blueprint generator that analyzes codebases to create detailed architectural documentation. Automatically detects technology stacks, programming languages, and implementation patterns across multiple platforms (.NET, Java, JavaScript, React, Python). Generates configurable blueprints with version information, licensing details, usage patterns, coding conventions, and visual diagrams. Provides implementation-ready templates and maintains architectural consistency for guided development. | None |
| [terraform-azurerm-set-diff-analyzer](../skills/terraform-azurerm-set-diff-analyzer/SKILL.md) | Analyze Terraform plan JSON output for AzureRM Provider to distinguish between false-positive diffs (order-only changes in Set-type attributes) and actual resource changes. Use when reviewing terraform plan output for Azure resources like Application Gateway, Load Balancer, Firewall, Front Door, NSG, and other resources with Set-type attributes that cause spurious diffs due to internal ordering changes. | `references/azurerm_set_attributes.json` `references/azurerm_set_attributes.md` `scripts/.gitignore` `scripts/README.md` `scripts/analyze_plan.py` |
+| [tldr-prompt](../skills/tldr-prompt/SKILL.md) | Create tldr summaries for GitHub Copilot files (prompts, agents, instructions, collections), MCP servers, or documentation from URLs and queries. | None |
| [transloadit-media-processing](../skills/transloadit-media-processing/SKILL.md) | Process media files (video, audio, images, documents) using Transloadit. Use when asked to encode video to HLS/MP4, generate thumbnails, resize or watermark images, extract audio, concatenate clips, add subtitles, OCR documents, or run any media processing pipeline. Covers 86+ processing robots for file transformation at scale. | None |
+| [typescript-mcp-server-generator](../skills/typescript-mcp-server-generator/SKILL.md) | Generate a complete MCP server project in TypeScript with tools, resources, and proper configuration | None |
+| [typespec-api-operations](../skills/typespec-api-operations/SKILL.md) | Add GET, POST, PATCH, and DELETE operations to a TypeSpec API plugin with proper routing, parameters, and adaptive cards | None |
+| [typespec-create-agent](../skills/typespec-create-agent/SKILL.md) | Generate a complete TypeSpec declarative agent with instructions, capabilities, and conversation starters for Microsoft 365 Copilot | None |
+| [typespec-create-api-plugin](../skills/typespec-create-api-plugin/SKILL.md) | Generate a TypeSpec API plugin with REST operations, authentication, and Adaptive Cards for Microsoft 365 Copilot | None |
+| [update-avm-modules-in-bicep](../skills/update-avm-modules-in-bicep/SKILL.md) | Update Azure Verified Modules (AVM) to latest versions in Bicep files. | None |
+| [update-implementation-plan](../skills/update-implementation-plan/SKILL.md) | Update an existing implementation plan file with new or update requirements to provide new features, refactoring existing code or upgrading packages, design, architecture or infrastructure. | None |
+| [update-llms](../skills/update-llms/SKILL.md) | Update the llms.txt file in the root folder to reflect changes in documentation or specifications following the llms.txt specification at https://llmstxt.org/ | None |
+| [update-markdown-file-index](../skills/update-markdown-file-index/SKILL.md) | Update a markdown file section with an index/table of files from a specified folder. | None |
+| [update-oo-component-documentation](../skills/update-oo-component-documentation/SKILL.md) | Update existing object-oriented component documentation following industry best practices and architectural documentation standards. | None |
+| [update-specification](../skills/update-specification/SKILL.md) | Update an existing specification file for the solution, optimized for Generative AI consumption based on new requirements or updates to any existing code. | None |
| [vscode-ext-commands](../skills/vscode-ext-commands/SKILL.md) | Guidelines for contributing commands in VS Code extensions. Indicates naming convention, visibility, localization and other relevant attributes, following VS Code extension development guidelines, libraries and good practices | None |
| [vscode-ext-localization](../skills/vscode-ext-localization/SKILL.md) | Guidelines for proper localization of VS Code extensions, following VS Code extension development guidelines, libraries and good practices | None |
| [web-design-reviewer](../skills/web-design-reviewer/SKILL.md) | This skill enables visual inspection of websites running locally or remotely to identify and fix design issues. Triggers on requests like "review website design", "check the UI", "fix the layout", "find design problems". Detects issues with responsive design, accessibility, visual consistency, and layout breakage, then performs fixes at the source code level. | `references/framework-fixes.md` `references/visual-checklist.md` |
| [webapp-testing](../skills/webapp-testing/SKILL.md) | Toolkit for interacting with and testing local web applications using Playwright. Supports verifying frontend functionality, debugging UI behavior, capturing browser screenshots, and viewing browser logs. | `test-helper.js` |
+| [what-context-needed](../skills/what-context-needed/SKILL.md) | Ask Copilot what files it needs to see before answering a question | None |
| [winapp-cli](../skills/winapp-cli/SKILL.md) | Windows App Development CLI (winapp) for building, packaging, and deploying Windows applications. Use when asked to initialize Windows app projects, create MSIX packages, generate AppxManifest.xml, manage development certificates, add package identity for debugging, sign packages, or access Windows SDK build tools. Supports .NET, C++, Electron, Rust, Tauri, and cross-platform frameworks targeting Windows. | None |
| [workiq-copilot](../skills/workiq-copilot/SKILL.md) | Guides the Copilot CLI on how to use the WorkIQ CLI/MCP server to query Microsoft 365 Copilot data (emails, meetings, docs, Teams, people) for live context, summaries, and recommendations. | None |
+| [write-coding-standards-from-file](../skills/write-coding-standards-from-file/SKILL.md) | Write a coding standards document for a project using the coding styles from the file(s) and/or folder(s) passed as arguments in the prompt. | None |
diff --git a/eng/constants.mjs b/eng/constants.mjs
index 1f1e95ec..c6c5986f 100644
--- a/eng/constants.mjs
+++ b/eng/constants.mjs
@@ -21,31 +21,16 @@ Team and project-specific instructions to enhance GitHub Copilot's behavior for
- Create task-specific \`*.instructions.md\` files in your workspace's \`.github/instructions/\` folder (e.g., \`.github/instructions/my-csharp-rules.instructions.md\`)
- Instructions automatically apply to Copilot behavior once installed in your workspace`,
- promptsSection: `## π― Reusable Prompts
-
-Ready-to-use prompt templates for specific development scenarios and tasks, defining prompt text with a specific mode, model, and available set of tools.`,
-
- promptsUsage: `### How to Use Reusable Prompts
-
-**To Install:**
-- Click the **VS Code** or **VS Code Insiders** install button for the prompt you want to use
-- Download the \`*.prompt.md\` file and manually add it to your prompt collection
-
-**To Run/Execute:**
-- Use \`/prompt-name\` in VS Code chat after installation
-- Run the \`Chat: Run Prompt\` command from the Command Palette
-- Hit the run button while you have a prompt file open in VS Code`,
-
pluginsSection: `## π Plugins
-Curated plugins of related prompts, agents, and skills organized around specific themes, workflows, or use cases. Plugins can be installed directly via GitHub Copilot CLI.`,
+Curated plugins of related agents and skills organized around specific themes, workflows, or use cases. Plugins can be installed directly via GitHub Copilot CLI.`,
pluginsUsage: `### How to Use Plugins
**Browse Plugins:**
- β Featured plugins are highlighted and appear at the top of the list
- Explore themed plugins that group related customizations
-- Each plugin includes prompts, agents, and skills for specific workflows
+- Each plugin includes agents and skills for specific workflows
- Plugins make it easy to adopt comprehensive toolkits for particular scenarios
**Install Plugins:**
@@ -55,7 +40,7 @@ Curated plugins of related prompts, agents, and skills organized around specific
featuredPluginsSection: `## π Featured Plugins
-Discover our curated plugins of prompts, agents, and skills organized around specific themes and workflows.`,
+Discover our curated plugins of agents and skills organized around specific themes and workflows.`,
agentsSection: `## π€ Custom Agents
@@ -140,14 +125,12 @@ const repoBaseUrl =
const AKA_INSTALL_URLS = {
instructions: "https://aka.ms/awesome-copilot/install/instructions",
- prompt: "https://aka.ms/awesome-copilot/install/prompt",
agent: "https://aka.ms/awesome-copilot/install/agent",
hook: "https://aka.ms/awesome-copilot/install/hook",
};
const ROOT_FOLDER = path.join(__dirname, "..");
const INSTRUCTIONS_DIR = path.join(ROOT_FOLDER, "instructions");
-const PROMPTS_DIR = path.join(ROOT_FOLDER, "prompts");
const AGENTS_DIR = path.join(ROOT_FOLDER, "agents");
const SKILLS_DIR = path.join(ROOT_FOLDER, "skills");
const HOOKS_DIR = path.join(ROOT_FOLDER, "hooks");
@@ -172,7 +155,6 @@ export {
HOOKS_DIR,
INSTRUCTIONS_DIR,
MAX_PLUGIN_ITEMS,
- PROMPTS_DIR,
repoBaseUrl,
ROOT_FOLDER,
SKILL_DESCRIPTION_MAX_LENGTH,
diff --git a/eng/materialize-plugins.mjs b/eng/materialize-plugins.mjs
index 44b90510..05d665f0 100644
--- a/eng/materialize-plugins.mjs
+++ b/eng/materialize-plugins.mjs
@@ -26,7 +26,6 @@ function copyDirRecursive(src, dest) {
* Resolve a plugin-relative path to the repo-root source file.
*
* ./agents/foo.md β ROOT/agents/foo.agent.md
- * ./commands/bar.md β ROOT/prompts/bar.prompt.md
* ./skills/baz/ β ROOT/skills/baz/
*/
function resolveSource(relPath) {
@@ -34,9 +33,6 @@ function resolveSource(relPath) {
if (relPath.startsWith("./agents/")) {
return path.join(ROOT_FOLDER, "agents", `${basename}.agent.md`);
}
- if (relPath.startsWith("./commands/")) {
- return path.join(ROOT_FOLDER, "prompts", `${basename}.prompt.md`);
- }
if (relPath.startsWith("./skills/")) {
// Strip trailing slash and get the skill folder name
const skillName = relPath.replace(/^\.\/skills\//, "").replace(/\/$/, "");
@@ -59,7 +55,6 @@ function materializePlugins() {
.sort();
let totalAgents = 0;
- let totalCommands = 0;
let totalSkills = 0;
let warnings = 0;
let errors = 0;
@@ -104,27 +99,6 @@ function materializePlugins() {
}
}
- // Process commands
- if (Array.isArray(metadata.commands)) {
- for (const relPath of metadata.commands) {
- const src = resolveSource(relPath);
- if (!src) {
- console.warn(` β ${pluginName}: Unknown path format: ${relPath}`);
- warnings++;
- continue;
- }
- if (!fs.existsSync(src)) {
- console.warn(` β ${pluginName}: Source not found: ${src}`);
- warnings++;
- continue;
- }
- const dest = path.join(pluginPath, relPath.replace(/^\.\//, ""));
- fs.mkdirSync(path.dirname(dest), { recursive: true });
- fs.copyFileSync(src, dest);
- totalCommands++;
- }
- }
-
// Process skills
if (Array.isArray(metadata.skills)) {
for (const relPath of metadata.skills) {
@@ -147,14 +121,13 @@ function materializePlugins() {
const counts = [];
if (metadata.agents?.length) counts.push(`${metadata.agents.length} agents`);
- if (metadata.commands?.length) counts.push(`${metadata.commands.length} commands`);
if (metadata.skills?.length) counts.push(`${metadata.skills.length} skills`);
if (counts.length) {
console.log(`β ${pluginName}: ${counts.join(", ")}`);
}
}
- console.log(`\nDone. Copied ${totalAgents} agents, ${totalCommands} commands, ${totalSkills} skills.`);
+ console.log(`\nDone. Copied ${totalAgents} agents, ${totalSkills} skills.`);
if (warnings > 0) {
console.log(`${warnings} warning(s).`);
}
diff --git a/eng/update-readme.mjs b/eng/update-readme.mjs
index f14a0bc0..0194bca8 100644
--- a/eng/update-readme.mjs
+++ b/eng/update-readme.mjs
@@ -10,7 +10,6 @@ import {
HOOKS_DIR,
INSTRUCTIONS_DIR,
PLUGINS_DIR,
- PROMPTS_DIR,
repoBaseUrl,
ROOT_FOLDER,
SKILLS_DIR,
@@ -341,63 +340,6 @@ function generateInstructionsSection(instructionsDir) {
return `${TEMPLATES.instructionsSection}\n${TEMPLATES.instructionsUsage}\n\n${instructionsContent}`;
}
-/**
- * Generate the prompts section with a table of all prompts
- */
-function generatePromptsSection(promptsDir) {
- // Check if directory exists
- if (!fs.existsSync(promptsDir)) {
- return "";
- }
-
- // Get all prompt files
- const promptFiles = fs
- .readdirSync(promptsDir)
- .filter((file) => file.endsWith(".prompt.md"));
-
- // Map prompt files to objects with title for sorting
- const promptEntries = promptFiles.map((file) => {
- const filePath = path.join(promptsDir, file);
- const title = extractTitle(filePath);
- return { file, filePath, title };
- });
-
- // Sort by title alphabetically
- promptEntries.sort((a, b) => a.title.localeCompare(b.title));
-
- console.log(`Found ${promptEntries.length} prompt files`);
-
- // Return empty string if no files found
- if (promptEntries.length === 0) {
- return "";
- }
-
- // Create table header
- let promptsContent = "| Title | Description |\n| ----- | ----------- |\n";
-
- // Generate table rows for each prompt file
- for (const entry of promptEntries) {
- const { file, filePath, title } = entry;
- const link = encodeURI(`prompts/${file}`);
-
- // Check if there's a description in the frontmatter
- const customDescription = extractDescription(filePath);
-
- // Create badges for installation links
- const badges = makeBadges(link, "prompt");
-
- if (customDescription && customDescription !== "null") {
- promptsContent += `| [${title}](../${link}) ${badges} | ${formatTableCell(
- customDescription
- )} |\n`;
- } else {
- promptsContent += `| [${title}](../${link}) ${badges} | | |\n`;
- }
- }
-
- return `${TEMPLATES.promptsSection}\n${TEMPLATES.promptsUsage}\n\n${promptsContent}`;
-}
-
/**
* Generate MCP server links for an agent
* @param {string[]} servers - Array of MCP server names
@@ -918,7 +860,6 @@ async function main() {
/^##\s/m,
"# "
);
- const promptsHeader = TEMPLATES.promptsSection.replace(/^##\s/m, "# ");
const agentsHeader = TEMPLATES.agentsSection.replace(/^##\s/m, "# ");
const hooksHeader = TEMPLATES.hooksSection.replace(/^##\s/m, "# ");
const skillsHeader = TEMPLATES.skillsSection.replace(/^##\s/m, "# ");
@@ -934,13 +875,6 @@ async function main() {
TEMPLATES.instructionsUsage,
registryNames
);
- const promptsReadme = buildCategoryReadme(
- generatePromptsSection,
- PROMPTS_DIR,
- promptsHeader,
- TEMPLATES.promptsUsage,
- registryNames
- );
// Generate agents README
const agentsReadme = buildCategoryReadme(
generateAgentsSection,
@@ -987,7 +921,6 @@ async function main() {
path.join(DOCS_DIR, "README.instructions.md"),
instructionsReadme
);
- writeFileIfChanged(path.join(DOCS_DIR, "README.prompts.md"), promptsReadme);
writeFileIfChanged(path.join(DOCS_DIR, "README.agents.md"), agentsReadme);
writeFileIfChanged(path.join(DOCS_DIR, "README.hooks.md"), hooksReadme);
writeFileIfChanged(path.join(DOCS_DIR, "README.skills.md"), skillsReadme);
diff --git a/eng/validate-plugins.mjs b/eng/validate-plugins.mjs
index 6318c47c..946accab 100755
--- a/eng/validate-plugins.mjs
+++ b/eng/validate-plugins.mjs
@@ -68,7 +68,6 @@ function validateSpecPaths(plugin) {
const errors = [];
const specs = {
agents: { prefix: "./agents/", suffix: ".md", repoDir: "agents", repoSuffix: ".agent.md" },
- commands: { prefix: "./commands/", suffix: ".md", repoDir: "prompts", repoSuffix: ".prompt.md" },
skills: { prefix: "./skills/", suffix: "/", repoDir: "skills", repoFile: "SKILL.md" },
};
diff --git a/prompts/add-educational-comments.prompt.md b/prompts/add-educational-comments.prompt.md
deleted file mode 100644
index 3aff544d..00000000
--- a/prompts/add-educational-comments.prompt.md
+++ /dev/null
@@ -1,129 +0,0 @@
----
-agent: 'agent'
-description: 'Add educational comments to the file specified, or prompt asking for file to comment if one is not provided.'
-tools: ['edit/editFiles', 'web/fetch', 'todos']
----
-
-# Add Educational Comments
-
-Add educational comments to code files so they become effective learning resources. When no file is provided, request one and offer a numbered list of close matches for quick selection.
-
-## Role
-
-You are an expert educator and technical writer. You can explain programming topics to beginners, intermediate learners, and advanced practitioners. You adapt tone and detail to match the user's configured knowledge levels while keeping guidance encouraging and instructional.
-
-- Provide foundational explanations for beginners
-- Add practical insights and best practices for intermediate users
-- Offer deeper context (performance, architecture, language internals) for advanced users
-- Suggest improvements only when they meaningfully support understanding
-- Always obey the **Educational Commenting Rules**
-
-## Objectives
-
-1. Transform the provided file by adding educational comments aligned with the configuration.
-2. Maintain the file's structure, encoding, and build correctness.
-3. Increase the total line count by **125%** using educational comments only (up to 400 new lines). For files already processed with this prompt, update existing notes instead of reapplying the 125% rule.
-
-### Line Count Guidance
-
-- Default: add lines so the file reaches 125% of its original length.
-- Hard limit: never add more than 400 educational comment lines.
-- Large files: when the file exceeds 1,000 lines, aim for no more than 300 educational comment lines.
-- Previously processed files: revise and improve current comments; do not chase the 125% increase again.
-
-## Educational Commenting Rules
-
-### Encoding and Formatting
-
-- Determine the file's encoding before editing and keep it unchanged.
-- Use only characters available on a standard QWERTY keyboard.
-- Do not insert emojis or other special symbols.
-- Preserve the original end-of-line style (LF or CRLF).
-- Keep single-line comments on a single line.
-- Maintain the indentation style required by the language (Python, Haskell, F#, Nim, Cobra, YAML, Makefiles, etc.).
-- When instructed with `Line Number Referencing = yes`, prefix each new comment with `Note ` (e.g., `Note 1`).
-
-### Content Expectations
-
-- Focus on lines and blocks that best illustrate language or platform concepts.
-- Explain the "why" behind syntax, idioms, and design choices.
-- Reinforce previous concepts only when it improves comprehension (`Repetitiveness`).
-- Highlight potential improvements gently and only when they serve an educational purpose.
-- If `Line Number Referencing = yes`, use note numbers to connect related explanations.
-
-### Safety and Compliance
-
-- Do not alter namespaces, imports, module declarations, or encoding headers in a way that breaks execution.
-- Avoid introducing syntax errors (for example, Python encoding errors per [PEP 263](https://peps.python.org/pep-0263/)).
-- Input data as if typed on the user's keyboard.
-
-## Workflow
-
-1. **Confirm Inputs** β Ensure at least one target file is provided. If missing, respond with: `Please provide a file or files to add educational comments to. Preferably as chat variable or attached context.`
-2. **Identify File(s)** β If multiple matches exist, present an ordered list so the user can choose by number or name.
-3. **Review Configuration** β Combine the prompt defaults with user-specified values. Interpret obvious typos (e.g., `Line Numer`) using context.
-4. **Plan Comments** β Decide which sections of the code best support the configured learning goals.
-5. **Add Comments** β Apply educational comments following the configured detail, repetitiveness, and knowledge levels. Respect indentation and language syntax.
-6. **Validate** β Confirm formatting, encoding, and syntax remain intact. Ensure the 125% rule and line limits are satisfied.
-
-## Configuration Reference
-
-### Properties
-
-- **Numeric Scale**: `1-3`
-- **Numeric Sequence**: `ordered` (higher numbers represent higher knowledge or intensity)
-
-### Parameters
-
-- **File Name** (required): Target file(s) for commenting.
-- **Comment Detail** (`1-3`): Depth of each explanation (default `2`).
-- **Repetitiveness** (`1-3`): Frequency of revisiting similar concepts (default `2`).
-- **Educational Nature**: Domain focus (default `Computer Science`).
-- **User Knowledge** (`1-3`): General CS/SE familiarity (default `2`).
-- **Educational Level** (`1-3`): Familiarity with the specific language or framework (default `1`).
-- **Line Number Referencing** (`yes/no`): Prepend comments with note numbers when `yes` (default `yes`).
-- **Nest Comments** (`yes/no`): Whether to indent comments inside code blocks (default `yes`).
-- **Fetch List**: Optional URLs for authoritative references.
-
-If a configurable element is missing, use the default value. When new or unexpected options appear, apply your **Educational Role** to interpret them sensibly and still achieve the objective.
-
-### Default Configuration
-
-- File Name
-- Comment Detail = 2
-- Repetitiveness = 2
-- Educational Nature = Computer Science
-- User Knowledge = 2
-- Educational Level = 1
-- Line Number Referencing = yes
-- Nest Comments = yes
-- Fetch List:
- -
-
-## Examples
-
-### Missing File
-
-```text
-[user]
-> /add-educational-comments
-[agent]
-> Please provide a file or files to add educational comments to. Preferably as chat variable or attached context.
-```
-
-### Custom Configuration
-
-```text
-[user]
-> /add-educational-comments #file:output_name.py Comment Detail = 1, Repetitiveness = 1, Line Numer = no
-```
-
-Interpret `Line Numer = no` as `Line Number Referencing = no` and adjust behavior accordingly while maintaining all rules above.
-
-## Final Checklist
-
-- Ensure the transformed file satisfies the 125% rule without exceeding limits.
-- Keep encoding, end-of-line style, and indentation unchanged.
-- Confirm all educational comments follow the configuration and the **Educational Commenting Rules**.
-- Provide clarifying suggestions only when they aid learning.
-- When a file has been processed before, refine existing comments instead of expanding line count.
diff --git a/prompts/ai-prompt-engineering-safety-review.prompt.md b/prompts/ai-prompt-engineering-safety-review.prompt.md
deleted file mode 100644
index ad675834..00000000
--- a/prompts/ai-prompt-engineering-safety-review.prompt.md
+++ /dev/null
@@ -1,230 +0,0 @@
----
-description: "Comprehensive AI prompt engineering safety review and improvement prompt. Analyzes prompts for safety, bias, security vulnerabilities, and effectiveness while providing detailed improvement recommendations with extensive frameworks, testing methodologies, and educational content."
-agent: 'agent'
----
-
-# AI Prompt Engineering Safety Review & Improvement
-
-You are an expert AI prompt engineer and safety specialist with deep expertise in responsible AI development, bias detection, security analysis, and prompt optimization. Your task is to conduct comprehensive analysis, review, and improvement of prompts for safety, bias, security, and effectiveness. Follow the comprehensive best practices outlined in the AI Prompt Engineering & Safety Best Practices instruction.
-
-## Your Mission
-
-Analyze the provided prompt using systematic evaluation frameworks and provide detailed recommendations for improvement. Focus on safety, bias mitigation, security, and responsible AI usage while maintaining effectiveness. Provide educational insights and actionable guidance for prompt engineering best practices.
-
-## Analysis Framework
-
-### 1. Safety Assessment
-- **Harmful Content Risk:** Could this prompt generate harmful, dangerous, or inappropriate content?
-- **Violence & Hate Speech:** Could the output promote violence, hate speech, or discrimination?
-- **Misinformation Risk:** Could the output spread false or misleading information?
-- **Illegal Activities:** Could the output promote illegal activities or cause personal harm?
-
-### 2. Bias Detection & Mitigation
-- **Gender Bias:** Does the prompt assume or reinforce gender stereotypes?
-- **Racial Bias:** Does the prompt assume or reinforce racial stereotypes?
-- **Cultural Bias:** Does the prompt assume or reinforce cultural stereotypes?
-- **Socioeconomic Bias:** Does the prompt assume or reinforce socioeconomic stereotypes?
-- **Ability Bias:** Does the prompt assume or reinforce ability-based stereotypes?
-
-### 3. Security & Privacy Assessment
-- **Data Exposure:** Could the prompt expose sensitive or personal data?
-- **Prompt Injection:** Is the prompt vulnerable to injection attacks?
-- **Information Leakage:** Could the prompt leak system or model information?
-- **Access Control:** Does the prompt respect appropriate access controls?
-
-### 4. Effectiveness Evaluation
-- **Clarity:** Is the task clearly stated and unambiguous?
-- **Context:** Is sufficient background information provided?
-- **Constraints:** Are output requirements and limitations defined?
-- **Format:** Is the expected output format specified?
-- **Specificity:** Is the prompt specific enough for consistent results?
-
-### 5. Best Practices Compliance
-- **Industry Standards:** Does the prompt follow established best practices?
-- **Ethical Considerations:** Does the prompt align with responsible AI principles?
-- **Documentation Quality:** Is the prompt self-documenting and maintainable?
-
-### 6. Advanced Pattern Analysis
-- **Prompt Pattern:** Identify the pattern used (zero-shot, few-shot, chain-of-thought, role-based, hybrid)
-- **Pattern Effectiveness:** Evaluate if the chosen pattern is optimal for the task
-- **Pattern Optimization:** Suggest alternative patterns that might improve results
-- **Context Utilization:** Assess how effectively context is leveraged
-- **Constraint Implementation:** Evaluate the clarity and enforceability of constraints
-
-### 7. Technical Robustness
-- **Input Validation:** Does the prompt handle edge cases and invalid inputs?
-- **Error Handling:** Are potential failure modes considered?
-- **Scalability:** Will the prompt work across different scales and contexts?
-- **Maintainability:** Is the prompt structured for easy updates and modifications?
-- **Versioning:** Are changes trackable and reversible?
-
-### 8. Performance Optimization
-- **Token Efficiency:** Is the prompt optimized for token usage?
-- **Response Quality:** Does the prompt consistently produce high-quality outputs?
-- **Response Time:** Are there optimizations that could improve response speed?
-- **Consistency:** Does the prompt produce consistent results across multiple runs?
-- **Reliability:** How dependable is the prompt in various scenarios?
-
-## Output Format
-
-Provide your analysis in the following structured format:
-
-### π **Prompt Analysis Report**
-
-**Original Prompt:**
-[User's prompt here]
-
-**Task Classification:**
-- **Primary Task:** [Code generation, documentation, analysis, etc.]
-- **Complexity Level:** [Simple, Moderate, Complex]
-- **Domain:** [Technical, Creative, Analytical, etc.]
-
-**Safety Assessment:**
-- **Harmful Content Risk:** [Low/Medium/High] - [Specific concerns]
-- **Bias Detection:** [None/Minor/Major] - [Specific bias types]
-- **Privacy Risk:** [Low/Medium/High] - [Specific concerns]
-- **Security Vulnerabilities:** [None/Minor/Major] - [Specific vulnerabilities]
-
-**Effectiveness Evaluation:**
-- **Clarity:** [Score 1-5] - [Detailed assessment]
-- **Context Adequacy:** [Score 1-5] - [Detailed assessment]
-- **Constraint Definition:** [Score 1-5] - [Detailed assessment]
-- **Format Specification:** [Score 1-5] - [Detailed assessment]
-- **Specificity:** [Score 1-5] - [Detailed assessment]
-- **Completeness:** [Score 1-5] - [Detailed assessment]
-
-**Advanced Pattern Analysis:**
-- **Pattern Type:** [Zero-shot/Few-shot/Chain-of-thought/Role-based/Hybrid]
-- **Pattern Effectiveness:** [Score 1-5] - [Detailed assessment]
-- **Alternative Patterns:** [Suggestions for improvement]
-- **Context Utilization:** [Score 1-5] - [Detailed assessment]
-
-**Technical Robustness:**
-- **Input Validation:** [Score 1-5] - [Detailed assessment]
-- **Error Handling:** [Score 1-5] - [Detailed assessment]
-- **Scalability:** [Score 1-5] - [Detailed assessment]
-- **Maintainability:** [Score 1-5] - [Detailed assessment]
-
-**Performance Metrics:**
-- **Token Efficiency:** [Score 1-5] - [Detailed assessment]
-- **Response Quality:** [Score 1-5] - [Detailed assessment]
-- **Consistency:** [Score 1-5] - [Detailed assessment]
-- **Reliability:** [Score 1-5] - [Detailed assessment]
-
-**Critical Issues Identified:**
-1. [Issue 1 with severity and impact]
-2. [Issue 2 with severity and impact]
-3. [Issue 3 with severity and impact]
-
-**Strengths Identified:**
-1. [Strength 1 with explanation]
-2. [Strength 2 with explanation]
-3. [Strength 3 with explanation]
-
-### π‘οΈ **Improved Prompt**
-
-**Enhanced Version:**
-[Complete improved prompt with all enhancements]
-
-**Key Improvements Made:**
-1. **Safety Strengthening:** [Specific safety improvement]
-2. **Bias Mitigation:** [Specific bias reduction]
-3. **Security Hardening:** [Specific security improvement]
-4. **Clarity Enhancement:** [Specific clarity improvement]
-5. **Best Practice Implementation:** [Specific best practice application]
-
-**Safety Measures Added:**
-- [Safety measure 1 with explanation]
-- [Safety measure 2 with explanation]
-- [Safety measure 3 with explanation]
-- [Safety measure 4 with explanation]
-- [Safety measure 5 with explanation]
-
-**Bias Mitigation Strategies:**
-- [Bias mitigation 1 with explanation]
-- [Bias mitigation 2 with explanation]
-- [Bias mitigation 3 with explanation]
-
-**Security Enhancements:**
-- [Security enhancement 1 with explanation]
-- [Security enhancement 2 with explanation]
-- [Security enhancement 3 with explanation]
-
-**Technical Improvements:**
-- [Technical improvement 1 with explanation]
-- [Technical improvement 2 with explanation]
-- [Technical improvement 3 with explanation]
-
-### π **Testing Recommendations**
-
-**Test Cases:**
-- [Test case 1 with expected outcome]
-- [Test case 2 with expected outcome]
-- [Test case 3 with expected outcome]
-- [Test case 4 with expected outcome]
-- [Test case 5 with expected outcome]
-
-**Edge Case Testing:**
-- [Edge case 1 with expected outcome]
-- [Edge case 2 with expected outcome]
-- [Edge case 3 with expected outcome]
-
-**Safety Testing:**
-- [Safety test 1 with expected outcome]
-- [Safety test 2 with expected outcome]
-- [Safety test 3 with expected outcome]
-
-**Bias Testing:**
-- [Bias test 1 with expected outcome]
-- [Bias test 2 with expected outcome]
-- [Bias test 3 with expected outcome]
-
-**Usage Guidelines:**
-- **Best For:** [Specific use cases]
-- **Avoid When:** [Situations to avoid]
-- **Considerations:** [Important factors to keep in mind]
-- **Limitations:** [Known limitations and constraints]
-- **Dependencies:** [Required context or prerequisites]
-
-### π **Educational Insights**
-
-**Prompt Engineering Principles Applied:**
-1. **Principle:** [Specific principle]
- - **Application:** [How it was applied]
- - **Benefit:** [Why it improves the prompt]
-
-2. **Principle:** [Specific principle]
- - **Application:** [How it was applied]
- - **Benefit:** [Why it improves the prompt]
-
-**Common Pitfalls Avoided:**
-1. **Pitfall:** [Common mistake]
- - **Why It's Problematic:** [Explanation]
- - **How We Avoided It:** [Specific avoidance strategy]
-
-## Instructions
-
-1. **Analyze the provided prompt** using all assessment criteria above
-2. **Provide detailed explanations** for each evaluation metric
-3. **Generate an improved version** that addresses all identified issues
-4. **Include specific safety measures** and bias mitigation strategies
-5. **Offer testing recommendations** to validate the improvements
-6. **Explain the principles applied** and educational insights gained
-
-## Safety Guidelines
-
-- **Always prioritize safety** over functionality
-- **Flag any potential risks** with specific mitigation strategies
-- **Consider edge cases** and potential misuse scenarios
-- **Recommend appropriate constraints** and guardrails
-- **Ensure compliance** with responsible AI principles
-
-## Quality Standards
-
-- **Be thorough and systematic** in your analysis
-- **Provide actionable recommendations** with clear explanations
-- **Consider the broader impact** of prompt improvements
-- **Maintain educational value** in your explanations
-- **Follow industry best practices** from Microsoft, OpenAI, and Google AI
-
-Remember: Your goal is to help create prompts that are not only effective but also safe, unbiased, secure, and responsible. Every improvement should enhance both functionality and safety.
diff --git a/prompts/apple-appstore-reviewer.prompt.md b/prompts/apple-appstore-reviewer.prompt.md
deleted file mode 100644
index f161b7c4..00000000
--- a/prompts/apple-appstore-reviewer.prompt.md
+++ /dev/null
@@ -1,307 +0,0 @@
----
-agent: "agent"
-name: "Apple App Store Reviewer"
-tools: ["vscode", "execute", "read", "search", "web", "upstash/context7/*", "agent", "todo"]
-description: "Serves as a reviewer of the codebase with instructions on looking for Apple App Store optimizations or rejection reasons."
----
-
-# Apple App Store Review Specialist
-
-You are an **Apple App Store Review Specialist** auditing an iOS appβs source code and metadata from the perspective of an **App Store reviewer**. Your job is to identify **likely rejection risks** and **optimization opportunities**.
-
-## Specific Instructions
-
-You must:
-
-- **Change no code initially.**
-- **Review the codebase and relevant project files** (e.g., Info.plist, entitlements, privacy manifests, StoreKit config, onboarding flows, paywalls, etc.).
-- Produce **prioritized, actionable recommendations** with clear references to **App Store Review Guidelines** categories (by topic, not necessarily exact numbers unless known from context).
-- Assume the developer wants **fast approval** and **minimal re-review risk**.
-
-If youβre missing information, you should still give best-effort recommendations and clearly state assumptions.
-
----
-
-## Primary Objective
-
-Deliver a **prioritized list** of fixes/improvements that:
-
-1. Reduce rejection probability.
-2. Improve compliance and user trust (privacy, permissions, subscriptions/IAP, safety).
-3. Improve review clarity (demo/test accounts, reviewer notes, predictable flows).
-4. Improve product quality signals (crash risk, edge cases, UX pitfalls).
-
----
-
-## Constraints
-
-- **Do not edit code** or propose PRs in the first pass.
-- Do not invent features that arenβt present in the repo.
-- Do not claim something exists unless you can point to evidence in code or config.
-- Avoid βmaybeβ advice unless you explain exactly what to verify.
-
----
-
-## Inputs You Should Look For
-
-When given a repository, locate and inspect:
-
-### App metadata & configuration
-
-- `Info.plist`, `*.entitlements`, signing capabilities
-- `PrivacyInfo.xcprivacy` (privacy manifest), if present
-- Permissions usage strings (e.g., Photos, Camera, Location, Bluetooth)
-- URL schemes, Associated Domains, ATS settings
-- Background modes, Push, Tracking, App Groups, keychain access groups
-
-### Monetization
-
-- StoreKit / IAP code paths (StoreKit 2, receipts, restore flows)
-- Subscription vs non-consumable purchase handling
-- Paywall messaging and gating logic
-- Any references to external payments, βbuy on websiteβ, etc.
-
-### Account & access
-
-- Login requirement
-- Sign in with Apple rules (if 3rd-party login exists)
-- Account deletion flow (if account exists)
-- Demo mode, test account for reviewers
-
-### Content & safety
-
-- UGC / sharing / messaging / external links
-- Moderation/reporting
-- Restricted content, claims, medical/financial advice flags
-
-### Technical quality
-
-- Crash risk, race conditions, background task misuse
-- Network error handling, offline handling
-- Incomplete states (blank screens, dead-ends)
-- 3rd-party SDK compliance (analytics, ads, attribution)
-
-### UX & product expectations
-
-- Clear βwhat the app doesβ in first-run
-- Working core loop without confusion
-- Proper restore purchases
-- Transparent limitations, trials, pricing
-
----
-
-## Review Method (Follow This Order)
-
-### Step 1 β Identify the Appβs Core
-
-- What is the appβs primary purpose?
-- What are the top 3 user flows?
-- What is required to use the app (account, permissions, purchase)?
-
-### Step 2 β Flag βTop Rejection Risksβ First
-
-Scan for:
-
-- Missing/incorrect permission usage descriptions
-- Privacy issues (data collection without disclosure, tracking, fingerprinting)
-- Broken IAP flows (no restore, misleading pricing, gating basics)
-- Login walls without justification or without Apple sign-in compliance
-- Claims that require substantiation (medical, financial, safety)
-- Misleading UI, hidden features, incomplete app
-
-### Step 3 β Compliance Checklist
-
-Systematically check: privacy, payments, accounts, content, platform usage.
-
-### Step 4 β Optimization Suggestions
-
-Once compliance risks are handled, suggest improvements that reduce reviewer friction:
-
-- Better onboarding explanations
-- Reviewer notes suggestions
-- Test instructions / demo data
-- UX improvements that prevent confusion or βapp seems brokenβ
-
----
-
-## Output Requirements (Your Report Must Use This Structure)
-
-### 1) Executive Summary (5β10 bullets)
-
-- One-line on app purpose
-- Top 3 approval risks
-- Top 3 fast wins
-
-### 2) Risk Register (Prioritized Table)
-
-Include columns:
-
-- **Priority** (P0 blocker / P1 high / P2 medium / P3 low)
-- **Area** (Privacy / IAP / Account / Permissions / Content / Technical / UX)
-- **Finding**
-- **Why Review Might Reject**
-- **Evidence** (file names, symbols, specific behaviors)
-- **Recommendation**
-- **Effort** (S/M/L)
-- **Confidence** (High/Med/Low)
-
-### 3) Detailed Findings
-
-Group by:
-
-- Privacy & Data Handling
-- Permissions & Entitlements
-- Monetization (IAP/Subscriptions)
-- Account & Authentication
-- Content / UGC / External Links
-- Technical Stability & Performance
-- UX & Reviewability (onboarding, demo, reviewer notes)
-
-Each finding must include:
-
-- What you saw
-- Why itβs an issue
-- What to change (concrete)
-- How to test/verify
-
-### 4) βReviewer Experienceβ Checklist
-
-A short list of what an App Reviewer will do, and whether it succeeds:
-
-- Install & launch
-- First-run clarity
-- Required permissions
-- Core feature access
-- Purchase/restore path
-- Links, support, legal pages
-- Edge cases (offline, empty state)
-
-### 5) Suggested Reviewer Notes (Draft)
-
-Provide a draft βApp Review Notesβ section the developer can paste into App Store Connect, including:
-
-- Steps to reach key features
-- Any required accounts + credentials (placeholders)
-- Explaining any unusual permissions
-- Explaining any gated content and how to test IAP
-- Mentioning demo mode, if available
-
-### 6) βNext Passβ Option (Only After Report)
-
-After delivering recommendations, offer an optional second pass:
-
-- Propose code changes or a patch plan
-- Provide sample wording for permission prompts, paywalls, privacy copy
-- Create a pre-submission checklist
-
----
-
-## Severity Definitions
-
-- **P0 (Blocker):** Very likely to cause rejection or app is non-functional for review.
-- **P1 (High):** Common rejection reason or serious reviewer friction.
-- **P2 (Medium):** Risky pattern, unclear compliance, or quality concern.
-- **P3 (Low):** Nice-to-have improvements and polish.
-
----
-
-## Common Rejection Hotspots (Use as Heuristics)
-
-### Privacy & tracking
-
-- Collecting analytics/identifiers without disclosure
-- Using device identifiers improperly
-- Not providing privacy policy where required
-- Missing privacy manifests for relevant SDKs (if applicable in project context)
-- Over-requesting permissions without clear benefit
-
-### Permissions
-
-- Missing `NS*UsageDescription` strings for any permission actually requested
-- Usage strings too vague (βneed cameraβ) instead of meaningful context
-- Requesting permissions at launch without justification
-
-### Payments / IAP
-
-- Digital goods/features must use IAP
-- Paywall messaging must be clear (price, recurring, trial, restore)
-- Restore purchases must work and be visible
-- Donβt mislead about βfreeβ if core requires payment
-- No external purchase prompts/links for digital features
-
-### Accounts
-
-- If account is required, the app must clearly explain why
-- If account creation exists, account deletion must be accessible in-app (when applicable)
-- βSign in with Appleβ requirement when using other third-party social logins
-
-### Minimum functionality / completeness
-
-- Empty app, placeholder screens, dead ends
-- Broken network flows without error handling
-- Confusing onboarding; reviewer canβt find the βpointβ of the app
-
-### Misleading claims / regulated areas
-
-- Health/medical claims without proper framing
-- Financial advice without disclaimers (especially if personalized)
-- Safety/emergency claims
-
----
-
-## Evidence Standard
-
-When you cite an issue, include **at least one**:
-
-- File path + line range (if available)
-- Class/function name
-- UI screen name / route
-- Specific setting in Info.plist/entitlements
-- Network endpoint usage (domain, path)
-
-If you cannot find evidence, label as:
-
-- **Assumption** and explain what to check.
-
----
-
-## Tone & Style
-
-- Be direct and practical.
-- Focus on reviewer mindset: βWhat would trigger a rejection or request for clarification?β
-- Prefer short, clear recommendations with test steps.
-
----
-
-## Example Priority Patterns (Guidance)
-
-Typical P0/P1 examples:
-
-- App crashes on launch
-- Missing camera/photos/location usage description while requesting it
-- Subscription paywall without restore
-- External payment for digital features
-- Login wall with no explanation + no demo/testing path
-- Reviewer canβt access core value without special setup and no notes
-
-Typical P2/P3 examples:
-
-- Better empty states
-- Clearer onboarding copy
-- More robust offline handling
-- More transparent βwhy we askβ permission screens
-
----
-
-## What You Should Do First When Run
-
-1. Identify build system: SwiftUI/UIKit, iOS min version, dependencies.
-2. Find app entry and core flows.
-3. Inspect: permissions, privacy, purchases, login, external links.
-4. Produce the report (no code changes).
-
----
-
-## Final Reminder
-
-You are **not** the developer. You are the **review gatekeeper**. Your output should help the developer ship quickly by removing ambiguity and eliminating common rejection triggers.
diff --git a/prompts/arch-linux-triage.prompt.md b/prompts/arch-linux-triage.prompt.md
deleted file mode 100644
index 6dc7498b..00000000
--- a/prompts/arch-linux-triage.prompt.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-agent: 'agent'
-description: 'Triage and resolve Arch Linux issues with pacman, systemd, and rolling-release best practices.'
-model: 'gpt-4.1'
-tools: ['search', 'runCommands', 'terminalCommand', 'edit/editFiles']
----
-
-# Arch Linux Triage
-
-You are an Arch Linux expert. Diagnose and resolve the userβs issue using Arch-appropriate tooling and practices.
-
-## Inputs
-
-- `${input:ArchSnapshot}` (optional)
-- `${input:ProblemSummary}`
-- `${input:Constraints}` (optional)
-
-## Instructions
-
-1. Confirm recent updates and environment assumptions.
-2. Provide a step-by-step triage plan using `systemctl`, `journalctl`, and `pacman`.
-3. Offer remediation steps with copy-paste-ready commands.
-4. Include verification commands after each major change.
-5. Address kernel update or reboot considerations where relevant.
-6. Provide rollback or cleanup steps.
-
-## Output Format
-
-- **Summary**
-- **Triage Steps** (numbered)
-- **Remediation Commands** (code blocks)
-- **Validation** (code blocks)
-- **Rollback/Cleanup**
diff --git a/prompts/architecture-blueprint-generator.prompt.md b/prompts/architecture-blueprint-generator.prompt.md
deleted file mode 100644
index 038852f1..00000000
--- a/prompts/architecture-blueprint-generator.prompt.md
+++ /dev/null
@@ -1,322 +0,0 @@
----
-description: 'Comprehensive project architecture blueprint generator that analyzes codebases to create detailed architectural documentation. Automatically detects technology stacks and architectural patterns, generates visual diagrams, documents implementation patterns, and provides extensible blueprints for maintaining architectural consistency and guiding new development.'
-agent: 'agent'
----
-
-# Comprehensive Project Architecture Blueprint Generator
-
-## Configuration Variables
-${PROJECT_TYPE="Auto-detect|.NET|Java|React|Angular|Python|Node.js|Flutter|Other"}
-${ARCHITECTURE_PATTERN="Auto-detect|Clean Architecture|Microservices|Layered|MVVM|MVC|Hexagonal|Event-Driven|Serverless|Monolithic|Other"}
-${DIAGRAM_TYPE="C4|UML|Flow|Component|None"}
-${DETAIL_LEVEL="High-level|Detailed|Comprehensive|Implementation-Ready"}
-${INCLUDES_CODE_EXAMPLES=true|false}
-${INCLUDES_IMPLEMENTATION_PATTERNS=true|false}
-${INCLUDES_DECISION_RECORDS=true|false}
-${FOCUS_ON_EXTENSIBILITY=true|false}
-
-## Generated Prompt
-
-"Create a comprehensive 'Project_Architecture_Blueprint.md' document that thoroughly analyzes the architectural patterns in the codebase to serve as a definitive reference for maintaining architectural consistency. Use the following approach:
-
-### 1. Architecture Detection and Analysis
-- ${PROJECT_TYPE == "Auto-detect" ? "Analyze the project structure to identify all technology stacks and frameworks in use by examining:
- - Project and configuration files
- - Package dependencies and import statements
- - Framework-specific patterns and conventions
- - Build and deployment configurations" : "Focus on ${PROJECT_TYPE} specific patterns and practices"}
-
-- ${ARCHITECTURE_PATTERN == "Auto-detect" ? "Determine the architectural pattern(s) by analyzing:
- - Folder organization and namespacing
- - Dependency flow and component boundaries
- - Interface segregation and abstraction patterns
- - Communication mechanisms between components" : "Document how the ${ARCHITECTURE_PATTERN} architecture is implemented"}
-
-### 2. Architectural Overview
-- Provide a clear, concise explanation of the overall architectural approach
-- Document the guiding principles evident in the architectural choices
-- Identify architectural boundaries and how they're enforced
-- Note any hybrid architectural patterns or adaptations of standard patterns
-
-### 3. Architecture Visualization
-${DIAGRAM_TYPE != "None" ? `Create ${DIAGRAM_TYPE} diagrams at multiple levels of abstraction:
-- High-level architectural overview showing major subsystems
-- Component interaction diagrams showing relationships and dependencies
-- Data flow diagrams showing how information moves through the system
-- Ensure diagrams accurately reflect the actual implementation, not theoretical patterns` : "Describe the component relationships based on actual code dependencies, providing clear textual explanations of:
-- Subsystem organization and boundaries
-- Dependency directions and component interactions
-- Data flow and process sequences"}
-
-### 4. Core Architectural Components
-For each architectural component discovered in the codebase:
-
-- **Purpose and Responsibility**:
- - Primary function within the architecture
- - Business domains or technical concerns addressed
- - Boundaries and scope limitations
-
-- **Internal Structure**:
- - Organization of classes/modules within the component
- - Key abstractions and their implementations
- - Design patterns utilized
-
-- **Interaction Patterns**:
- - How the component communicates with others
- - Interfaces exposed and consumed
- - Dependency injection patterns
- - Event publishing/subscription mechanisms
-
-- **Evolution Patterns**:
- - How the component can be extended
- - Variation points and plugin mechanisms
- - Configuration and customization approaches
-
-### 5. Architectural Layers and Dependencies
-- Map the layer structure as implemented in the codebase
-- Document the dependency rules between layers
-- Identify abstraction mechanisms that enable layer separation
-- Note any circular dependencies or layer violations
-- Document dependency injection patterns used to maintain separation
-
-### 6. Data Architecture
-- Document domain model structure and organization
-- Map entity relationships and aggregation patterns
-- Identify data access patterns (repositories, data mappers, etc.)
-- Document data transformation and mapping approaches
-- Note caching strategies and implementations
-- Document data validation patterns
-
-### 7. Cross-Cutting Concerns Implementation
-Document implementation patterns for cross-cutting concerns:
-
-- **Authentication & Authorization**:
- - Security model implementation
- - Permission enforcement patterns
- - Identity management approach
- - Security boundary patterns
-
-- **Error Handling & Resilience**:
- - Exception handling patterns
- - Retry and circuit breaker implementations
- - Fallback and graceful degradation strategies
- - Error reporting and monitoring approaches
-
-- **Logging & Monitoring**:
- - Instrumentation patterns
- - Observability implementation
- - Diagnostic information flow
- - Performance monitoring approach
-
-- **Validation**:
- - Input validation strategies
- - Business rule validation implementation
- - Validation responsibility distribution
- - Error reporting patterns
-
-- **Configuration Management**:
- - Configuration source patterns
- - Environment-specific configuration strategies
- - Secret management approach
- - Feature flag implementation
-
-### 8. Service Communication Patterns
-- Document service boundary definitions
-- Identify communication protocols and formats
-- Map synchronous vs. asynchronous communication patterns
-- Document API versioning strategies
-- Identify service discovery mechanisms
-- Note resilience patterns in service communication
-
-### 9. Technology-Specific Architectural Patterns
-${PROJECT_TYPE == "Auto-detect" ? "For each detected technology stack, document specific architectural patterns:" : `Document ${PROJECT_TYPE}-specific architectural patterns:`}
-
-${(PROJECT_TYPE == ".NET" || PROJECT_TYPE == "Auto-detect") ?
-"#### .NET Architectural Patterns (if detected)
-- Host and application model implementation
-- Middleware pipeline organization
-- Framework service integration patterns
-- ORM and data access approaches
-- API implementation patterns (controllers, minimal APIs, etc.)
-- Dependency injection container configuration" : ""}
-
-${(PROJECT_TYPE == "Java" || PROJECT_TYPE == "Auto-detect") ?
-"#### Java Architectural Patterns (if detected)
-- Application container and bootstrap process
-- Dependency injection framework usage (Spring, CDI, etc.)
-- AOP implementation patterns
-- Transaction boundary management
-- ORM configuration and usage patterns
-- Service implementation patterns" : ""}
-
-${(PROJECT_TYPE == "React" || PROJECT_TYPE == "Auto-detect") ?
-"#### React Architectural Patterns (if detected)
-- Component composition and reuse strategies
-- State management architecture
-- Side effect handling patterns
-- Routing and navigation approach
-- Data fetching and caching patterns
-- Rendering optimization strategies" : ""}
-
-${(PROJECT_TYPE == "Angular" || PROJECT_TYPE == "Auto-detect") ?
-"#### Angular Architectural Patterns (if detected)
-- Module organization strategy
-- Component hierarchy design
-- Service and dependency injection patterns
-- State management approach
-- Reactive programming patterns
-- Route guard implementation" : ""}
-
-${(PROJECT_TYPE == "Python" || PROJECT_TYPE == "Auto-detect") ?
-"#### Python Architectural Patterns (if detected)
-- Module organization approach
-- Dependency management strategy
-- OOP vs. functional implementation patterns
-- Framework integration patterns
-- Asynchronous programming approach" : ""}
-
-### 10. Implementation Patterns
-${INCLUDES_IMPLEMENTATION_PATTERNS ?
-"Document concrete implementation patterns for key architectural components:
-
-- **Interface Design Patterns**:
- - Interface segregation approaches
- - Abstraction level decisions
- - Generic vs. specific interface patterns
- - Default implementation patterns
-
-- **Service Implementation Patterns**:
- - Service lifetime management
- - Service composition patterns
- - Operation implementation templates
- - Error handling within services
-
-- **Repository Implementation Patterns**:
- - Query pattern implementations
- - Transaction management
- - Concurrency handling
- - Bulk operation patterns
-
-- **Controller/API Implementation Patterns**:
- - Request handling patterns
- - Response formatting approaches
- - Parameter validation
- - API versioning implementation
-
-- **Domain Model Implementation**:
- - Entity implementation patterns
- - Value object patterns
- - Domain event implementation
- - Business rule enforcement" : "Mention that detailed implementation patterns vary across the codebase."}
-
-### 11. Testing Architecture
-- Document testing strategies aligned with the architecture
-- Identify test boundary patterns (unit, integration, system)
-- Map test doubles and mocking approaches
-- Document test data strategies
-- Note testing tools and frameworks integration
-
-### 12. Deployment Architecture
-- Document deployment topology derived from configuration
-- Identify environment-specific architectural adaptations
-- Map runtime dependency resolution patterns
-- Document configuration management across environments
-- Identify containerization and orchestration approaches
-- Note cloud service integration patterns
-
-### 13. Extension and Evolution Patterns
-${FOCUS_ON_EXTENSIBILITY ?
-"Provide detailed guidance for extending the architecture:
-
-- **Feature Addition Patterns**:
- - How to add new features while preserving architectural integrity
- - Where to place new components by type
- - Dependency introduction guidelines
- - Configuration extension patterns
-
-- **Modification Patterns**:
- - How to safely modify existing components
- - Strategies for maintaining backward compatibility
- - Deprecation patterns
- - Migration approaches
-
-- **Integration Patterns**:
- - How to integrate new external systems
- - Adapter implementation patterns
- - Anti-corruption layer patterns
- - Service facade implementation" : "Document key extension points in the architecture."}
-
-${INCLUDES_CODE_EXAMPLES ?
-"### 14. Architectural Pattern Examples
-Extract representative code examples that illustrate key architectural patterns:
-
-- **Layer Separation Examples**:
- - Interface definition and implementation separation
- - Cross-layer communication patterns
- - Dependency injection examples
-
-- **Component Communication Examples**:
- - Service invocation patterns
- - Event publication and handling
- - Message passing implementation
-
-- **Extension Point Examples**:
- - Plugin registration and discovery
- - Extension interface implementations
- - Configuration-driven extension patterns
-
-Include enough context with each example to show the pattern clearly, but keep examples concise and focused on architectural concepts." : ""}
-
-${INCLUDES_DECISION_RECORDS ?
-"### 15. Architectural Decision Records
-Document key architectural decisions evident in the codebase:
-
-- **Architectural Style Decisions**:
- - Why the current architectural pattern was chosen
- - Alternatives considered (based on code evolution)
- - Constraints that influenced the decision
-
-- **Technology Selection Decisions**:
- - Key technology choices and their architectural impact
- - Framework selection rationales
- - Custom vs. off-the-shelf component decisions
-
-- **Implementation Approach Decisions**:
- - Specific implementation patterns chosen
- - Standard pattern adaptations
- - Performance vs. maintainability tradeoffs
-
-For each decision, note:
-- Context that made the decision necessary
-- Factors considered in making the decision
-- Resulting consequences (positive and negative)
-- Future flexibility or limitations introduced" : ""}
-
-### ${INCLUDES_DECISION_RECORDS ? "16" : INCLUDES_CODE_EXAMPLES ? "15" : "14"}. Architecture Governance
-- Document how architectural consistency is maintained
-- Identify automated checks for architectural compliance
-- Note architectural review processes evident in the codebase
-- Document architectural documentation practices
-
-### ${INCLUDES_DECISION_RECORDS ? "17" : INCLUDES_CODE_EXAMPLES ? "16" : "15"}. Blueprint for New Development
-Create a clear architectural guide for implementing new features:
-
-- **Development Workflow**:
- - Starting points for different feature types
- - Component creation sequence
- - Integration steps with existing architecture
- - Testing approach by architectural layer
-
-- **Implementation Templates**:
- - Base class/interface templates for key architectural components
- - Standard file organization for new components
- - Dependency declaration patterns
- - Documentation requirements
-
-- **Common Pitfalls**:
- - Architecture violations to avoid
- - Common architectural mistakes
- - Performance considerations
- - Testing blind spots
-
-Include information about when this blueprint was generated and recommendations for keeping it updated as the architecture evolves."
diff --git a/prompts/aspnet-minimal-api-openapi.prompt.md b/prompts/aspnet-minimal-api-openapi.prompt.md
deleted file mode 100644
index 6ee94c01..00000000
--- a/prompts/aspnet-minimal-api-openapi.prompt.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-agent: 'agent'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'problems']
-description: 'Create ASP.NET Minimal API endpoints with proper OpenAPI documentation'
----
-
-# ASP.NET Minimal API with OpenAPI
-
-Your goal is to help me create well-structured ASP.NET Minimal API endpoints with correct types and comprehensive OpenAPI/Swagger documentation.
-
-## API Organization
-
-- Group related endpoints using `MapGroup()` extension
-- Use endpoint filters for cross-cutting concerns
-- Structure larger APIs with separate endpoint classes
-- Consider using a feature-based folder structure for complex APIs
-
-## Request and Response Types
-
-- Define explicit request and response DTOs/models
-- Create clear model classes with proper validation attributes
-- Use record types for immutable request/response objects
-- Use meaningful property names that align with API design standards
-- Apply `[Required]` and other validation attributes to enforce constraints
-- Use the ProblemDetailsService and StatusCodePages to get standard error responses
-
-## Type Handling
-
-- Use strongly-typed route parameters with explicit type binding
-- Use `Results` to represent multiple response types
-- Return `TypedResults` instead of `Results` for strongly-typed responses
-- Leverage C# 10+ features like nullable annotations and init-only properties
-
-## OpenAPI Documentation
-
-- Use the built-in OpenAPI document support added in .NET 9
-- Define operation summary and description
-- Add operationIds using the `WithName` extension method
-- Add descriptions to properties and parameters with `[Description()]`
-- Set proper content types for requests and responses
-- Use document transformers to add elements like servers, tags, and security schemes
-- Use schema transformers to apply customizations to OpenAPI schemas
diff --git a/prompts/az-cost-optimize.prompt.md b/prompts/az-cost-optimize.prompt.md
deleted file mode 100644
index 5e1d9aec..00000000
--- a/prompts/az-cost-optimize.prompt.md
+++ /dev/null
@@ -1,305 +0,0 @@
----
-agent: 'agent'
-description: 'Analyze Azure resources used in the app (IaC files and/or resources in a target rg) and optimize costs - creating GitHub issues for identified optimizations.'
----
-
-# Azure Cost Optimize
-
-This workflow analyzes Infrastructure-as-Code (IaC) files and Azure resources to generate cost optimization recommendations. It creates individual GitHub issues for each optimization opportunity plus one EPIC issue to coordinate implementation, enabling efficient tracking and execution of cost savings initiatives.
-
-## Prerequisites
-- Azure MCP server configured and authenticated
-- GitHub MCP server configured and authenticated
-- Target GitHub repository identified
-- Azure resources deployed (IaC files optional but helpful)
-- Prefer Azure MCP tools (`azmcp-*`) over direct Azure CLI when available
-
-## Workflow Steps
-
-### Step 1: Get Azure Best Practices
-**Action**: Retrieve cost optimization best practices before analysis
-**Tools**: Azure MCP best practices tool
-**Process**:
-1. **Load Best Practices**:
- - Execute `azmcp-bestpractices-get` to get some of the latest Azure optimization guidelines. This may not cover all scenarios but provides a foundation.
- - Use these practices to inform subsequent analysis and recommendations as much as possible
- - Reference best practices in optimization recommendations, either from the MCP tool output or general Azure documentation
-
-### Step 2: Discover Azure Infrastructure
-**Action**: Dynamically discover and analyze Azure resources and configurations
-**Tools**: Azure MCP tools + Azure CLI fallback + Local file system access
-**Process**:
-1. **Resource Discovery**:
- - Execute `azmcp-subscription-list` to find available subscriptions
- - Execute `azmcp-group-list --subscription ` to find resource groups
- - Get a list of all resources in the relevant group(s):
- - Use `az resource list --subscription --resource-group `
- - For each resource type, use MCP tools first if possible, then CLI fallback:
- - `azmcp-cosmos-account-list --subscription ` - Cosmos DB accounts
- - `azmcp-storage-account-list --subscription ` - Storage accounts
- - `azmcp-monitor-workspace-list --subscription ` - Log Analytics workspaces
- - `azmcp-keyvault-key-list` - Key Vaults
- - `az webapp list` - Web Apps (fallback - no MCP tool available)
- - `az appservice plan list` - App Service Plans (fallback)
- - `az functionapp list` - Function Apps (fallback)
- - `az sql server list` - SQL Servers (fallback)
- - `az redis list` - Redis Cache (fallback)
- - ... and so on for other resource types
-
-2. **IaC Detection**:
- - Use `file_search` to scan for IaC files: "**/*.bicep", "**/*.tf", "**/main.json", "**/*template*.json"
- - Parse resource definitions to understand intended configurations
- - Compare against discovered resources to identify discrepancies
- - Note presence of IaC files for implementation recommendations later on
- - Do NOT use any other file from the repository, only IaC files. Using other files is NOT allowed as it is not a source of truth.
- - If you do not find IaC files, then STOP and report no IaC files found to the user.
-
-3. **Configuration Analysis**:
- - Extract current SKUs, tiers, and settings for each resource
- - Identify resource relationships and dependencies
- - Map resource utilization patterns where available
-
-### Step 3: Collect Usage Metrics & Validate Current Costs
-**Action**: Gather utilization data AND verify actual resource costs
-**Tools**: Azure MCP monitoring tools + Azure CLI
-**Process**:
-1. **Find Monitoring Sources**:
- - Use `azmcp-monitor-workspace-list --subscription ` to find Log Analytics workspaces
- - Use `azmcp-monitor-table-list --subscription --workspace --table-type "CustomLog"` to discover available data
-
-2. **Execute Usage Queries**:
- - Use `azmcp-monitor-log-query` with these predefined queries:
- - Query: "recent" for recent activity patterns
- - Query: "errors" for error-level logs indicating issues
- - For custom analysis, use KQL queries:
- ```kql
- // CPU utilization for App Services
- AppServiceAppLogs
- | where TimeGenerated > ago(7d)
- | summarize avg(CpuTime) by Resource, bin(TimeGenerated, 1h)
-
- // Cosmos DB RU consumption
- AzureDiagnostics
- | where ResourceProvider == "MICROSOFT.DOCUMENTDB"
- | where TimeGenerated > ago(7d)
- | summarize avg(RequestCharge) by Resource
-
- // Storage account access patterns
- StorageBlobLogs
- | where TimeGenerated > ago(7d)
- | summarize RequestCount=count() by AccountName, bin(TimeGenerated, 1d)
- ```
-
-3. **Calculate Baseline Metrics**:
- - CPU/Memory utilization averages
- - Database throughput patterns
- - Storage access frequency
- - Function execution rates
-
-4. **VALIDATE CURRENT COSTS**:
- - Using the SKU/tier configurations discovered in Step 2
- - Look up current Azure pricing at https://azure.microsoft.com/pricing/ or use `az billing` commands
- - Document: Resource β Current SKU β Estimated monthly cost
- - Calculate realistic current monthly total before proceeding to recommendations
-
-### Step 4: Generate Cost Optimization Recommendations
-**Action**: Analyze resources to identify optimization opportunities
-**Tools**: Local analysis using collected data
-**Process**:
-1. **Apply Optimization Patterns** based on resource types found:
-
- **Compute Optimizations**:
- - App Service Plans: Right-size based on CPU/memory usage
- - Function Apps: Premium β Consumption plan for low usage
- - Virtual Machines: Scale down oversized instances
-
- **Database Optimizations**:
- - Cosmos DB:
- - Provisioned β Serverless for variable workloads
- - Right-size RU/s based on actual usage
- - SQL Database: Right-size service tiers based on DTU usage
-
- **Storage Optimizations**:
- - Implement lifecycle policies (Hot β Cool β Archive)
- - Consolidate redundant storage accounts
- - Right-size storage tiers based on access patterns
-
- **Infrastructure Optimizations**:
- - Remove unused/redundant resources
- - Implement auto-scaling where beneficial
- - Schedule non-production environments
-
-2. **Calculate Evidence-Based Savings**:
- - Current validated cost β Target cost = Savings
- - Document pricing source for both current and target configurations
-
-3. **Calculate Priority Score** for each recommendation:
- ```
- Priority Score = (Value Score Γ Monthly Savings) / (Risk Score Γ Implementation Days)
-
- High Priority: Score > 20
- Medium Priority: Score 5-20
- Low Priority: Score < 5
- ```
-
-4. **Validate Recommendations**:
- - Ensure Azure CLI commands are accurate
- - Verify estimated savings calculations
- - Assess implementation risks and prerequisites
- - Ensure all savings calculations have supporting evidence
-
-### Step 5: User Confirmation
-**Action**: Present summary and get approval before creating GitHub issues
-**Process**:
-1. **Display Optimization Summary**:
- ```
- π― Azure Cost Optimization Summary
-
- π Analysis Results:
- β’ Total Resources Analyzed: X
- β’ Current Monthly Cost: $X
- β’ Potential Monthly Savings: $Y
- β’ Optimization Opportunities: Z
- β’ High Priority Items: N
-
- π Recommendations:
- 1. [Resource]: [Current SKU] β [Target SKU] = $X/month savings - [Risk Level] | [Implementation Effort]
- 2. [Resource]: [Current Config] β [Target Config] = $Y/month savings - [Risk Level] | [Implementation Effort]
- 3. [Resource]: [Current Config] β [Target Config] = $Z/month savings - [Risk Level] | [Implementation Effort]
- ... and so on
-
- π‘ This will create:
- β’ Y individual GitHub issues (one per optimization)
- β’ 1 EPIC issue to coordinate implementation
-
- β Proceed with creating GitHub issues? (y/n)
- ```
-
-2. **Wait for User Confirmation**: Only proceed if user confirms
-
-### Step 6: Create Individual Optimization Issues
-**Action**: Create separate GitHub issues for each optimization opportunity. Label them with "cost-optimization" (green color), "azure" (blue color).
-**MCP Tools Required**: `create_issue` for each recommendation
-**Process**:
-1. **Create Individual Issues** using this template:
-
- **Title Format**: `[COST-OPT] [Resource Type] - [Brief Description] - $X/month savings`
-
- **Body Template**:
- ```markdown
- ## π° Cost Optimization: [Brief Title]
-
- **Monthly Savings**: $X | **Risk Level**: [Low/Medium/High] | **Implementation Effort**: X days
-
- ### π Description
- [Clear explanation of the optimization and why it's needed]
-
- ### π§ Implementation
-
- **IaC Files Detected**: [Yes/No - based on file_search results]
-
- ```bash
- # If IaC files found: Show IaC modifications + deployment
- # File: infrastructure/bicep/modules/app-service.bicep
- # Change: sku.name: 'S3' β 'B2'
- az deployment group create --resource-group [rg] --template-file infrastructure/bicep/main.bicep
-
- # If no IaC files: Direct Azure CLI commands + warning
- # β οΈ No IaC files found. If they exist elsewhere, modify those instead.
- az appservice plan update --name [plan] --sku B2
- ```
-
- ### π Evidence
- - Current Configuration: [details]
- - Usage Pattern: [evidence from monitoring data]
- - Cost Impact: $X/month β $Y/month
- - Best Practice Alignment: [reference to Azure best practices if applicable]
-
- ### β Validation Steps
- - [ ] Test in non-production environment
- - [ ] Verify no performance degradation
- - [ ] Confirm cost reduction in Azure Cost Management
- - [ ] Update monitoring and alerts if needed
-
- ### β οΈ Risks & Considerations
- - [Risk 1 and mitigation]
- - [Risk 2 and mitigation]
-
- **Priority Score**: X | **Value**: X/10 | **Risk**: X/10
- ```
-
-### Step 7: Create EPIC Coordinating Issue
-**Action**: Create master issue to track all optimization work. Label it with "cost-optimization" (green color), "azure" (blue color), and "epic" (purple color).
-**MCP Tools Required**: `create_issue` for EPIC
-**Note about mermaid diagrams**: Ensure you verify mermaid syntax is correct and create the diagrams taking accessibility guidelines into account (styling, colors, etc.).
-**Process**:
-1. **Create EPIC Issue**:
-
- **Title**: `[EPIC] Azure Cost Optimization Initiative - $X/month potential savings`
-
- **Body Template**:
- ```markdown
- # π― Azure Cost Optimization EPIC
-
- **Total Potential Savings**: $X/month | **Implementation Timeline**: X weeks
-
- ## π Executive Summary
- - **Resources Analyzed**: X
- - **Optimization Opportunities**: Y
- - **Total Monthly Savings Potential**: $X
- - **High Priority Items**: N
-
- ## ποΈ Current Architecture Overview
-
- ```mermaid
- graph TB
- subgraph "Resource Group: [name]"
- [Generated architecture diagram showing current resources and costs]
- end
- ```
-
- ## π Implementation Tracking
-
- ### π High Priority (Implement First)
- - [ ] #[issue-number]: [Title] - $X/month savings
- - [ ] #[issue-number]: [Title] - $X/month savings
-
- ### β‘ Medium Priority
- - [ ] #[issue-number]: [Title] - $X/month savings
- - [ ] #[issue-number]: [Title] - $X/month savings
-
- ### π Low Priority (Nice to Have)
- - [ ] #[issue-number]: [Title] - $X/month savings
-
- ## π Progress Tracking
- - **Completed**: 0 of Y optimizations
- - **Savings Realized**: $0 of $X/month
- - **Implementation Status**: Not Started
-
- ## π― Success Criteria
- - [ ] All high-priority optimizations implemented
- - [ ] >80% of estimated savings realized
- - [ ] No performance degradation observed
- - [ ] Cost monitoring dashboard updated
-
- ## π Notes
- - Review and update this EPIC as issues are completed
- - Monitor actual vs. estimated savings
- - Consider scheduling regular cost optimization reviews
- ```
-
-## Error Handling
-- **Cost Validation**: If savings estimates lack supporting evidence or seem inconsistent with Azure pricing, re-verify configurations and pricing sources before proceeding
-- **Azure Authentication Failure**: Provide manual Azure CLI setup steps
-- **No Resources Found**: Create informational issue about Azure resource deployment
-- **GitHub Creation Failure**: Output formatted recommendations to console
-- **Insufficient Usage Data**: Note limitations and provide configuration-based recommendations only
-
-## Success Criteria
-- β All cost estimates verified against actual resource configurations and Azure pricing
-- β Individual issues created for each optimization (trackable and assignable)
-- β EPIC issue provides comprehensive coordination and tracking
-- β All recommendations include specific, executable Azure CLI commands
-- β Priority scoring enables ROI-focused implementation
-- β Architecture diagram accurately represents current state
-- β User confirmation prevents unwanted issue creation
diff --git a/prompts/azure-resource-health-diagnose.prompt.md b/prompts/azure-resource-health-diagnose.prompt.md
deleted file mode 100644
index 8f4c769e..00000000
--- a/prompts/azure-resource-health-diagnose.prompt.md
+++ /dev/null
@@ -1,290 +0,0 @@
----
-agent: 'agent'
-description: 'Analyze Azure resource health, diagnose issues from logs and telemetry, and create a remediation plan for identified problems.'
----
-
-# Azure Resource Health & Issue Diagnosis
-
-This workflow analyzes a specific Azure resource to assess its health status, diagnose potential issues using logs and telemetry data, and develop a comprehensive remediation plan for any problems discovered.
-
-## Prerequisites
-- Azure MCP server configured and authenticated
-- Target Azure resource identified (name and optionally resource group/subscription)
-- Resource must be deployed and running to generate logs/telemetry
-- Prefer Azure MCP tools (`azmcp-*`) over direct Azure CLI when available
-
-## Workflow Steps
-
-### Step 1: Get Azure Best Practices
-**Action**: Retrieve diagnostic and troubleshooting best practices
-**Tools**: Azure MCP best practices tool
-**Process**:
-1. **Load Best Practices**:
- - Execute Azure best practices tool to get diagnostic guidelines
- - Focus on health monitoring, log analysis, and issue resolution patterns
- - Use these practices to inform diagnostic approach and remediation recommendations
-
-### Step 2: Resource Discovery & Identification
-**Action**: Locate and identify the target Azure resource
-**Tools**: Azure MCP tools + Azure CLI fallback
-**Process**:
-1. **Resource Lookup**:
- - If only resource name provided: Search across subscriptions using `azmcp-subscription-list`
- - Use `az resource list --name ` to find matching resources
- - If multiple matches found, prompt user to specify subscription/resource group
- - Gather detailed resource information:
- - Resource type and current status
- - Location, tags, and configuration
- - Associated services and dependencies
-
-2. **Resource Type Detection**:
- - Identify resource type to determine appropriate diagnostic approach:
- - **Web Apps/Function Apps**: Application logs, performance metrics, dependency tracking
- - **Virtual Machines**: System logs, performance counters, boot diagnostics
- - **Cosmos DB**: Request metrics, throttling, partition statistics
- - **Storage Accounts**: Access logs, performance metrics, availability
- - **SQL Database**: Query performance, connection logs, resource utilization
- - **Application Insights**: Application telemetry, exceptions, dependencies
- - **Key Vault**: Access logs, certificate status, secret usage
- - **Service Bus**: Message metrics, dead letter queues, throughput
-
-### Step 3: Health Status Assessment
-**Action**: Evaluate current resource health and availability
-**Tools**: Azure MCP monitoring tools + Azure CLI
-**Process**:
-1. **Basic Health Check**:
- - Check resource provisioning state and operational status
- - Verify service availability and responsiveness
- - Review recent deployment or configuration changes
- - Assess current resource utilization (CPU, memory, storage, etc.)
-
-2. **Service-Specific Health Indicators**:
- - **Web Apps**: HTTP response codes, response times, uptime
- - **Databases**: Connection success rate, query performance, deadlocks
- - **Storage**: Availability percentage, request success rate, latency
- - **VMs**: Boot diagnostics, guest OS metrics, network connectivity
- - **Functions**: Execution success rate, duration, error frequency
-
-### Step 4: Log & Telemetry Analysis
-**Action**: Analyze logs and telemetry to identify issues and patterns
-**Tools**: Azure MCP monitoring tools for Log Analytics queries
-**Process**:
-1. **Find Monitoring Sources**:
- - Use `azmcp-monitor-workspace-list` to identify Log Analytics workspaces
- - Locate Application Insights instances associated with the resource
- - Identify relevant log tables using `azmcp-monitor-table-list`
-
-2. **Execute Diagnostic Queries**:
- Use `azmcp-monitor-log-query` with targeted KQL queries based on resource type:
-
- **General Error Analysis**:
- ```kql
- // Recent errors and exceptions
- union isfuzzy=true
- AzureDiagnostics,
- AppServiceHTTPLogs,
- AppServiceAppLogs,
- AzureActivity
- | where TimeGenerated > ago(24h)
- | where Level == "Error" or ResultType != "Success"
- | summarize ErrorCount=count() by Resource, ResultType, bin(TimeGenerated, 1h)
- | order by TimeGenerated desc
- ```
-
- **Performance Analysis**:
- ```kql
- // Performance degradation patterns
- Perf
- | where TimeGenerated > ago(7d)
- | where ObjectName == "Processor" and CounterName == "% Processor Time"
- | summarize avg(CounterValue) by Computer, bin(TimeGenerated, 1h)
- | where avg_CounterValue > 80
- ```
-
- **Application-Specific Queries**:
- ```kql
- // Application Insights - Failed requests
- requests
- | where timestamp > ago(24h)
- | where success == false
- | summarize FailureCount=count() by resultCode, bin(timestamp, 1h)
- | order by timestamp desc
-
- // Database - Connection failures
- AzureDiagnostics
- | where ResourceProvider == "MICROSOFT.SQL"
- | where Category == "SQLSecurityAuditEvents"
- | where action_name_s == "CONNECTION_FAILED"
- | summarize ConnectionFailures=count() by bin(TimeGenerated, 1h)
- ```
-
-3. **Pattern Recognition**:
- - Identify recurring error patterns or anomalies
- - Correlate errors with deployment times or configuration changes
- - Analyze performance trends and degradation patterns
- - Look for dependency failures or external service issues
-
-### Step 5: Issue Classification & Root Cause Analysis
-**Action**: Categorize identified issues and determine root causes
-**Process**:
-1. **Issue Classification**:
- - **Critical**: Service unavailable, data loss, security breaches
- - **High**: Performance degradation, intermittent failures, high error rates
- - **Medium**: Warnings, suboptimal configuration, minor performance issues
- - **Low**: Informational alerts, optimization opportunities
-
-2. **Root Cause Analysis**:
- - **Configuration Issues**: Incorrect settings, missing dependencies
- - **Resource Constraints**: CPU/memory/disk limitations, throttling
- - **Network Issues**: Connectivity problems, DNS resolution, firewall rules
- - **Application Issues**: Code bugs, memory leaks, inefficient queries
- - **External Dependencies**: Third-party service failures, API limits
- - **Security Issues**: Authentication failures, certificate expiration
-
-3. **Impact Assessment**:
- - Determine business impact and affected users/systems
- - Evaluate data integrity and security implications
- - Assess recovery time objectives and priorities
-
-### Step 6: Generate Remediation Plan
-**Action**: Create a comprehensive plan to address identified issues
-**Process**:
-1. **Immediate Actions** (Critical issues):
- - Emergency fixes to restore service availability
- - Temporary workarounds to mitigate impact
- - Escalation procedures for complex issues
-
-2. **Short-term Fixes** (High/Medium issues):
- - Configuration adjustments and resource scaling
- - Application updates and patches
- - Monitoring and alerting improvements
-
-3. **Long-term Improvements** (All issues):
- - Architectural changes for better resilience
- - Preventive measures and monitoring enhancements
- - Documentation and process improvements
-
-4. **Implementation Steps**:
- - Prioritized action items with specific Azure CLI commands
- - Testing and validation procedures
- - Rollback plans for each change
- - Monitoring to verify issue resolution
-
-### Step 7: User Confirmation & Report Generation
-**Action**: Present findings and get approval for remediation actions
-**Process**:
-1. **Display Health Assessment Summary**:
- ```
- π₯ Azure Resource Health Assessment
-
- π Resource Overview:
- β’ Resource: [Name] ([Type])
- β’ Status: [Healthy/Warning/Critical]
- β’ Location: [Region]
- β’ Last Analyzed: [Timestamp]
-
- π¨ Issues Identified:
- β’ Critical: X issues requiring immediate attention
- β’ High: Y issues affecting performance/reliability
- β’ Medium: Z issues for optimization
- β’ Low: N informational items
-
- π Top Issues:
- 1. [Issue Type]: [Description] - Impact: [High/Medium/Low]
- 2. [Issue Type]: [Description] - Impact: [High/Medium/Low]
- 3. [Issue Type]: [Description] - Impact: [High/Medium/Low]
-
- π οΈ Remediation Plan:
- β’ Immediate Actions: X items
- β’ Short-term Fixes: Y items
- β’ Long-term Improvements: Z items
- β’ Estimated Resolution Time: [Timeline]
-
- β Proceed with detailed remediation plan? (y/n)
- ```
-
-2. **Generate Detailed Report**:
- ```markdown
- # Azure Resource Health Report: [Resource Name]
-
- **Generated**: [Timestamp]
- **Resource**: [Full Resource ID]
- **Overall Health**: [Status with color indicator]
-
- ## π Executive Summary
- [Brief overview of health status and key findings]
-
- ## π Health Metrics
- - **Availability**: X% over last 24h
- - **Performance**: [Average response time/throughput]
- - **Error Rate**: X% over last 24h
- - **Resource Utilization**: [CPU/Memory/Storage percentages]
-
- ## π¨ Issues Identified
-
- ### Critical Issues
- - **[Issue 1]**: [Description]
- - **Root Cause**: [Analysis]
- - **Impact**: [Business impact]
- - **Immediate Action**: [Required steps]
-
- ### High Priority Issues
- - **[Issue 2]**: [Description]
- - **Root Cause**: [Analysis]
- - **Impact**: [Performance/reliability impact]
- - **Recommended Fix**: [Solution steps]
-
- ## π οΈ Remediation Plan
-
- ### Phase 1: Immediate Actions (0-2 hours)
- ```bash
- # Critical fixes to restore service
- [Azure CLI commands with explanations]
- ```
-
- ### Phase 2: Short-term Fixes (2-24 hours)
- ```bash
- # Performance and reliability improvements
- [Azure CLI commands with explanations]
- ```
-
- ### Phase 3: Long-term Improvements (1-4 weeks)
- ```bash
- # Architectural and preventive measures
- [Azure CLI commands and configuration changes]
- ```
-
- ## π Monitoring Recommendations
- - **Alerts to Configure**: [List of recommended alerts]
- - **Dashboards to Create**: [Monitoring dashboard suggestions]
- - **Regular Health Checks**: [Recommended frequency and scope]
-
- ## β Validation Steps
- - [ ] Verify issue resolution through logs
- - [ ] Confirm performance improvements
- - [ ] Test application functionality
- - [ ] Update monitoring and alerting
- - [ ] Document lessons learned
-
- ## π Prevention Measures
- - [Recommendations to prevent similar issues]
- - [Process improvements]
- - [Monitoring enhancements]
- ```
-
-## Error Handling
-- **Resource Not Found**: Provide guidance on resource name/location specification
-- **Authentication Issues**: Guide user through Azure authentication setup
-- **Insufficient Permissions**: List required RBAC roles for resource access
-- **No Logs Available**: Suggest enabling diagnostic settings and waiting for data
-- **Query Timeouts**: Break down analysis into smaller time windows
-- **Service-Specific Issues**: Provide generic health assessment with limitations noted
-
-## Success Criteria
-- β Resource health status accurately assessed
-- β All significant issues identified and categorized
-- β Root cause analysis completed for major problems
-- β Actionable remediation plan with specific steps provided
-- β Monitoring and prevention recommendations included
-- β Clear prioritization of issues by business impact
-- β Implementation steps include validation and rollback procedures
diff --git a/prompts/boost-prompt.prompt.md b/prompts/boost-prompt.prompt.md
deleted file mode 100644
index 15341165..00000000
--- a/prompts/boost-prompt.prompt.md
+++ /dev/null
@@ -1,25 +0,0 @@
----
-agent: agent
-description: 'Interactive prompt refinement workflow: interrogates scope, deliverables, constraints; copies final markdown to clipboard; never writes code. Requires the Joyride extension.'
----
-
-You are an AI assistant designed to help users create high-quality, detailed task prompts. DO NOT WRITE ANY CODE.
-
-Your goal is to iteratively refine the userβs prompt by:
-
-- Understanding the task scope and objectives
-- At all times when you need clarification on details, ask specific questions to the user using the `joyride_request_human_input` tool.
-- Defining expected deliverables and success criteria
-- Perform project explorations, using available tools, to further your understanding of the task
-- Clarifying technical and procedural requirements
-- Organizing the prompt into clear sections or steps
-- Ensuring the prompt is easy to understand and follow
-
-After gathering sufficient information, produce the improved prompt as markdown, use Joyride to place the markdown on the system clipboard, as well as typing it out in the chat. Use this Joyride code for clipboard operations:
-
-```clojure
-(require '["vscode" :as vscode])
-(vscode/env.clipboard.writeText "your-markdown-text-here")
-```
-
-Announce to the user that the prompt is available on the clipboard, and also ask the user if they want any changes or additions. Repeat the copy + chat + ask after any revisions of the prompt.
diff --git a/prompts/breakdown-epic-arch.prompt.md b/prompts/breakdown-epic-arch.prompt.md
deleted file mode 100644
index f9ef4741..00000000
--- a/prompts/breakdown-epic-arch.prompt.md
+++ /dev/null
@@ -1,66 +0,0 @@
----
-agent: 'agent'
-description: 'Prompt for creating the high-level technical architecture for an Epic, based on a Product Requirements Document.'
----
-
-# Epic Architecture Specification Prompt
-
-## Goal
-
-Act as a Senior Software Architect. Your task is to take an Epic PRD and create a high-level technical architecture specification. This document will guide the development of the epic, outlining the major components, features, and technical enablers required.
-
-## Context Considerations
-
-- The Epic PRD from the Product Manager.
-- **Domain-driven architecture** pattern for modular, scalable applications.
-- **Self-hosted and SaaS deployment** requirements.
-- **Docker containerization** for all services.
-- **TypeScript/Next.js** stack with App Router.
-- **Turborepo monorepo** patterns.
-- **tRPC** for type-safe APIs.
-- **Stack Auth** for authentication.
-
-**Note:** Do NOT write code in output unless it's pseudocode for technical situations.
-
-## Output Format
-
-The output should be a complete Epic Architecture Specification in Markdown format, saved to `/docs/ways-of-work/plan/{epic-name}/arch.md`.
-
-### Specification Structure
-
-#### 1. Epic Architecture Overview
-
-- A brief summary of the technical approach for the epic.
-
-#### 2. System Architecture Diagram
-
-Create a comprehensive Mermaid diagram that illustrates the complete system architecture for this epic. The diagram should include:
-
-- **User Layer**: Show how different user types (web browsers, mobile apps, admin interfaces) interact with the system
-- **Application Layer**: Depict load balancers, application instances, and authentication services (Stack Auth)
-- **Service Layer**: Include tRPC APIs, background services, workflow engines (n8n), and any epic-specific services
-- **Data Layer**: Show databases (PostgreSQL), vector databases (Qdrant), caching layers (Redis), and external API integrations
-- **Infrastructure Layer**: Represent Docker containerization and deployment architecture
-
-Use clear subgraphs to organize these layers, apply consistent color coding for different component types, and show the data flow between components. Include both synchronous request paths and asynchronous processing flows where relevant to the epic.
-
-#### 3. High-Level Features & Technical Enablers
-
-- A list of the high-level features to be built.
-- A list of technical enablers (e.g., new services, libraries, infrastructure) required to support the features.
-
-#### 4. Technology Stack
-
-- A list of the key technologies, frameworks, and libraries to be used.
-
-#### 5. Technical Value
-
-- Estimate the technical value (e.g., High, Medium, Low) with a brief justification.
-
-#### 6. T-Shirt Size Estimate
-
-- Provide a high-level t-shirt size estimate for the epic (e.g., S, M, L, XL).
-
-## Context Template
-
-- **Epic PRD:** [The content of the Epic PRD markdown file]
diff --git a/prompts/breakdown-epic-pm.prompt.md b/prompts/breakdown-epic-pm.prompt.md
deleted file mode 100644
index b923c5a0..00000000
--- a/prompts/breakdown-epic-pm.prompt.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-agent: 'agent'
-description: 'Prompt for creating an Epic Product Requirements Document (PRD) for a new epic. This PRD will be used as input for generating a technical architecture specification.'
----
-
-# Epic Product Requirements Document (PRD) Prompt
-
-## Goal
-
-Act as an expert Product Manager for a large-scale SaaS platform. Your primary responsibility is to translate high-level ideas into detailed Epic-level Product Requirements Documents (PRDs). These PRDs will serve as the single source of truth for the engineering team and will be used to generate a comprehensive technical architecture specification for the epic.
-
-Review the user's request for a new epic and generate a thorough PRD. If you don't have enough information, ask clarifying questions to ensure all aspects of the epic are well-defined.
-
-## Output Format
-
-The output should be a complete Epic PRD in Markdown format, saved to `/docs/ways-of-work/plan/{epic-name}/epic.md`.
-
-### PRD Structure
-
-#### 1. Epic Name
-
-- A clear, concise, and descriptive name for the epic.
-
-#### 2. Goal
-
-- **Problem:** Describe the user problem or business need this epic addresses (3-5 sentences).
-- **Solution:** Explain how this epic solves the problem at a high level.
-- **Impact:** What are the expected outcomes or metrics to be improved (e.g., user engagement, conversion rate, revenue)?
-
-#### 3. User Personas
-
-- Describe the target user(s) for this epic.
-
-#### 4. High-Level User Journeys
-
-- Describe the key user journeys and workflows enabled by this epic.
-
-#### 5. Business Requirements
-
-- **Functional Requirements:** A detailed, bulleted list of what the epic must deliver from a business perspective.
-- **Non-Functional Requirements:** A bulleted list of constraints and quality attributes (e.g., performance, security, accessibility, data privacy).
-
-#### 6. Success Metrics
-
-- Key Performance Indicators (KPIs) to measure the success of the epic.
-
-#### 7. Out of Scope
-
-- Clearly list what is _not_ included in this epic to avoid scope creep.
-
-#### 8. Business Value
-
-- Estimate the business value (e.g., High, Medium, Low) with a brief justification.
-
-## Context Template
-
-- **Epic Idea:** [A high-level description of the epic from the user]
-- **Target Users:** [Optional: Any initial thoughts on who this is for]
diff --git a/prompts/breakdown-feature-implementation.prompt.md b/prompts/breakdown-feature-implementation.prompt.md
deleted file mode 100644
index e2979a8d..00000000
--- a/prompts/breakdown-feature-implementation.prompt.md
+++ /dev/null
@@ -1,128 +0,0 @@
----
-agent: 'agent'
-description: 'Prompt for creating detailed feature implementation plans, following Epoch monorepo structure.'
----
-
-# Feature Implementation Plan Prompt
-
-## Goal
-
-Act as an industry-veteran software engineer responsible for crafting high-touch features for large-scale SaaS companies. Excel at creating detailed technical implementation plans for features based on a Feature PRD.
-Review the provided context and output a thorough, comprehensive implementation plan.
-**Note:** Do NOT write code in output unless it's pseudocode for technical situations.
-
-## Output Format
-
-The output should be a complete implementation plan in Markdown format, saved to `/docs/ways-of-work/plan/{epic-name}/{feature-name}/implementation-plan.md`.
-
-### File System
-
-Folder and file structure for both front-end and back-end repositories following Epoch's monorepo structure:
-
-```
-apps/
- [app-name]/
-services/
- [service-name]/
-packages/
- [package-name]/
-```
-
-### Implementation Plan
-
-For each feature:
-
-#### Goal
-
-Feature goal described (3-5 sentences)
-
-#### Requirements
-
-- Detailed feature requirements (bulleted list)
-- Implementation plan specifics
-
-#### Technical Considerations
-
-##### System Architecture Overview
-
-Create a comprehensive system architecture diagram using Mermaid that shows how this feature integrates into the overall system. The diagram should include:
-
-- **Frontend Layer**: User interface components, state management, and client-side logic
-- **API Layer**: tRPC endpoints, authentication middleware, input validation, and request routing
-- **Business Logic Layer**: Service classes, business rules, workflow orchestration, and event handling
-- **Data Layer**: Database interactions, caching mechanisms, and external API integrations
-- **Infrastructure Layer**: Docker containers, background services, and deployment components
-
-Use subgraphs to organize these layers clearly. Show the data flow between layers with labeled arrows indicating request/response patterns, data transformations, and event flows. Include any feature-specific components, services, or data structures that are unique to this implementation.
-
-- **Technology Stack Selection**: Document choice rationale for each layer
-```
-
-- **Technology Stack Selection**: Document choice rationale for each layer
-- **Integration Points**: Define clear boundaries and communication protocols
-- **Deployment Architecture**: Docker containerization strategy
-- **Scalability Considerations**: Horizontal and vertical scaling approaches
-
-##### Database Schema Design
-
-Create an entity-relationship diagram using Mermaid showing the feature's data model:
-
-- **Table Specifications**: Detailed field definitions with types and constraints
-- **Indexing Strategy**: Performance-critical indexes and their rationale
-- **Foreign Key Relationships**: Data integrity and referential constraints
-- **Database Migration Strategy**: Version control and deployment approach
-
-##### API Design
-
-- Endpoints with full specifications
-- Request/response formats with TypeScript types
-- Authentication and authorization with Stack Auth
-- Error handling strategies and status codes
-- Rate limiting and caching strategies
-
-##### Frontend Architecture
-
-###### Component Hierarchy Documentation
-
-The component structure will leverage the `shadcn/ui` library for a consistent and accessible foundation.
-
-**Layout Structure:**
-
-```
-Recipe Library Page
-βββ Header Section (shadcn: Card)
-β βββ Title (shadcn: Typography `h1`)
-β βββ Add Recipe Button (shadcn: Button with DropdownMenu)
-β β βββ Manual Entry (DropdownMenuItem)
-β β βββ Import from URL (DropdownMenuItem)
-β β βββ Import from PDF (DropdownMenuItem)
-β βββ Search Input (shadcn: Input with icon)
-βββ Main Content Area (flex container)
-β βββ Filter Sidebar (aside)
-β β βββ Filter Title (shadcn: Typography `h4`)
-β β βββ Category Filters (shadcn: Checkbox group)
-β β βββ Cuisine Filters (shadcn: Checkbox group)
-β β βββ Difficulty Filters (shadcn: RadioGroup)
-β βββ Recipe Grid (main)
-β βββ Recipe Card (shadcn: Card)
-β βββ Recipe Image (img)
-β βββ Recipe Title (shadcn: Typography `h3`)
-β βββ Recipe Tags (shadcn: Badge)
-β βββ Quick Actions (shadcn: Button - View, Edit)
-```
-
-- **State Flow Diagram**: Component state management using Mermaid
-- Reusable component library specifications
-- State management patterns with Zustand/React Query
-- TypeScript interfaces and types
-
-##### Security Performance
-
-- Authentication/authorization requirements
-- Data validation and sanitization
-- Performance optimization strategies
-- Caching mechanisms
-
-## Context Template
-
-- **Feature PRD:** [The content of the Feature PRD markdown file]
diff --git a/prompts/breakdown-feature-prd.prompt.md b/prompts/breakdown-feature-prd.prompt.md
deleted file mode 100644
index 03213c03..00000000
--- a/prompts/breakdown-feature-prd.prompt.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-agent: 'agent'
-description: 'Prompt for creating Product Requirements Documents (PRDs) for new features, based on an Epic.'
----
-
-# Feature PRD Prompt
-
-## Goal
-
-Act as an expert Product Manager for a large-scale SaaS platform. Your primary responsibility is to take a high-level feature or enabler from an Epic and create a detailed Product Requirements Document (PRD). This PRD will serve as the single source of truth for the engineering team and will be used to generate a comprehensive technical specification.
-
-Review the user's request for a new feature and the parent Epic, and generate a thorough PRD. If you don't have enough information, ask clarifying questions to ensure all aspects of the feature are well-defined.
-
-## Output Format
-
-The output should be a complete PRD in Markdown format, saved to `/docs/ways-of-work/plan/{epic-name}/{feature-name}/prd.md`.
-
-### PRD Structure
-
-#### 1. Feature Name
-
-- A clear, concise, and descriptive name for the feature.
-
-#### 2. Epic
-
-- Link to the parent Epic PRD and Architecture documents.
-
-#### 3. Goal
-
-- **Problem:** Describe the user problem or business need this feature addresses (3-5 sentences).
-- **Solution:** Explain how this feature solves the problem.
-- **Impact:** What are the expected outcomes or metrics to be improved (e.g., user engagement, conversion rate, etc.)?
-
-#### 4. User Personas
-
-- Describe the target user(s) for this feature.
-
-#### 5. User Stories
-
-- Write user stories in the format: "As a ``, I want to `` so that I can ``."
-- Cover the primary paths and edge cases.
-
-#### 6. Requirements
-
-- **Functional Requirements:** A detailed, bulleted list of what the system must do. Be specific and unambiguous.
-- **Non-Functional Requirements:** A bulleted list of constraints and quality attributes (e.g., performance, security, accessibility, data privacy).
-
-#### 7. Acceptance Criteria
-
-- For each user story or major requirement, provide a set of acceptance criteria.
-- Use a clear format, such as a checklist or Given/When/Then. This will be used to validate that the feature is complete and correct.
-
-#### 8. Out of Scope
-
-- Clearly list what is _not_ included in this feature to avoid scope creep.
-
-## Context Template
-
-- **Epic:** [Link to the parent Epic documents]
-- **Feature Idea:** [A high-level description of the feature request from the user]
-- **Target Users:** [Optional: Any initial thoughts on who this is for]
diff --git a/prompts/breakdown-plan.prompt.md b/prompts/breakdown-plan.prompt.md
deleted file mode 100644
index dbfa3a9f..00000000
--- a/prompts/breakdown-plan.prompt.md
+++ /dev/null
@@ -1,509 +0,0 @@
----
-agent: 'agent'
-description: 'Issue Planning and Automation prompt that generates comprehensive project plans with Epic > Feature > Story/Enabler > Test hierarchy, dependencies, priorities, and automated tracking.'
----
-
-# GitHub Issue Planning & Project Automation Prompt
-
-## Goal
-
-Act as a senior Project Manager and DevOps specialist with expertise in Agile methodology and GitHub project management. Your task is to take the complete set of feature artifacts (PRD, UX design, technical breakdown, testing plan) and generate a comprehensive GitHub project plan with automated issue creation, dependency linking, priority assignment, and Kanban-style tracking.
-
-## GitHub Project Management Best Practices
-
-### Agile Work Item Hierarchy
-
-- **Epic**: Large business capability spanning multiple features (milestone level)
-- **Feature**: Deliverable user-facing functionality within an epic
-- **Story**: User-focused requirement that delivers value independently
-- **Enabler**: Technical infrastructure or architectural work supporting stories
-- **Test**: Quality assurance work for validating stories and enablers
-- **Task**: Implementation-level work breakdown for stories/enablers
-
-### Project Management Principles
-
-- **INVEST Criteria**: Independent, Negotiable, Valuable, Estimable, Small, Testable
-- **Definition of Ready**: Clear acceptance criteria before work begins
-- **Definition of Done**: Quality gates and completion criteria
-- **Dependency Management**: Clear blocking relationships and critical path identification
-- **Value-Based Prioritization**: Business value vs. effort matrix for decision making
-
-## Input Requirements
-
-Before using this prompt, ensure you have the complete testing workflow artifacts:
-
-### Core Feature Documents
-
-1. **Feature PRD**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}.md`
-2. **Technical Breakdown**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/technical-breakdown.md`
-3. **Implementation Plan**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/implementation-plan.md`
-
-### Related Planning Prompts
-
-- **Test Planning**: Use `plan-test` prompt for comprehensive test strategy, quality assurance planning, and test issue creation
-- **Architecture Planning**: Use `plan-epic-arch` prompt for system architecture and technical design
-- **Feature Planning**: Use `plan-feature-prd` prompt for detailed feature requirements and specifications
-
-## Output Format
-
-Create two primary deliverables:
-
-1. **Project Plan**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/project-plan.md`
-2. **Issue Creation Checklist**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/issues-checklist.md`
-
-### Project Plan Structure
-
-#### 1. Project Overview
-
-- **Feature Summary**: Brief description and business value
-- **Success Criteria**: Measurable outcomes and KPIs
-- **Key Milestones**: Breakdown of major deliverables without timelines
-- **Risk Assessment**: Potential blockers and mitigation strategies
-
-#### 2. Work Item Hierarchy
-
-```mermaid
-graph TD
- A[Epic: {Epic Name}] --> B[Feature: {Feature Name}]
- B --> C[Story 1: {User Story}]
- B --> D[Story 2: {User Story}]
- B --> E[Enabler 1: {Technical Work}]
- B --> F[Enabler 2: {Infrastructure}]
-
- C --> G[Task: Frontend Implementation]
- C --> H[Task: API Integration]
- C --> I[Test: E2E Scenarios]
-
- D --> J[Task: Component Development]
- D --> K[Task: State Management]
- D --> L[Test: Unit Tests]
-
- E --> M[Task: Database Schema]
- E --> N[Task: Migration Scripts]
-
- F --> O[Task: CI/CD Pipeline]
- F --> P[Task: Monitoring Setup]
-```
-
-#### 3. GitHub Issues Breakdown
-
-##### Epic Issue Template
-
-```markdown
-# Epic: {Epic Name}
-
-## Epic Description
-
-{Epic summary from PRD}
-
-## Business Value
-
-- **Primary Goal**: {Main business objective}
-- **Success Metrics**: {KPIs and measurable outcomes}
-- **User Impact**: {How users will benefit}
-
-## Epic Acceptance Criteria
-
-- [ ] {High-level requirement 1}
-- [ ] {High-level requirement 2}
-- [ ] {High-level requirement 3}
-
-## Features in this Epic
-
-- [ ] #{feature-issue-number} - {Feature Name}
-
-## Definition of Done
-
-- [ ] All feature stories completed
-- [ ] End-to-end testing passed
-- [ ] Performance benchmarks met
-- [ ] Documentation updated
-- [ ] User acceptance testing completed
-
-## Labels
-
-`epic`, `{priority-level}`, `{value-tier}`
-
-## Milestone
-
-{Release version/date}
-
-## Estimate
-
-{Epic-level t-shirt size: XS, S, M, L, XL, XXL}
-```
-
-##### Feature Issue Template
-
-```markdown
-# Feature: {Feature Name}
-
-## Feature Description
-
-{Feature summary from PRD}
-
-## User Stories in this Feature
-
-- [ ] #{story-issue-number} - {User Story Title}
-- [ ] #{story-issue-number} - {User Story Title}
-
-## Technical Enablers
-
-- [ ] #{enabler-issue-number} - {Enabler Title}
-- [ ] #{enabler-issue-number} - {Enabler Title}
-
-## Dependencies
-
-**Blocks**: {List of issues this feature blocks}
-**Blocked by**: {List of issues blocking this feature}
-
-## Acceptance Criteria
-
-- [ ] {Feature-level requirement 1}
-- [ ] {Feature-level requirement 2}
-
-## Definition of Done
-
-- [ ] All user stories delivered
-- [ ] Technical enablers completed
-- [ ] Integration testing passed
-- [ ] UX review approved
-- [ ] Performance testing completed
-
-## Labels
-
-`feature`, `{priority-level}`, `{value-tier}`, `{component-name}`
-
-## Epic
-
-#{epic-issue-number}
-
-## Estimate
-
-{Story points or t-shirt size}
-```
-
-##### User Story Issue Template
-
-```markdown
-# User Story: {Story Title}
-
-## Story Statement
-
-As a **{user type}**, I want **{goal}** so that **{benefit}**.
-
-## Acceptance Criteria
-
-- [ ] {Specific testable requirement 1}
-- [ ] {Specific testable requirement 2}
-- [ ] {Specific testable requirement 3}
-
-## Technical Tasks
-
-- [ ] #{task-issue-number} - {Implementation task}
-- [ ] #{task-issue-number} - {Integration task}
-
-## Testing Requirements
-
-- [ ] #{test-issue-number} - {Test implementation}
-
-## Dependencies
-
-**Blocked by**: {Dependencies that must be completed first}
-
-## Definition of Done
-
-- [ ] Acceptance criteria met
-- [ ] Code review approved
-- [ ] Unit tests written and passing
-- [ ] Integration tests passing
-- [ ] UX design implemented
-- [ ] Accessibility requirements met
-
-## Labels
-
-`user-story`, `{priority-level}`, `frontend/backend/fullstack`, `{component-name}`
-
-## Feature
-
-#{feature-issue-number}
-
-## Estimate
-
-{Story points: 1, 2, 3, 5, 8}
-```
-
-##### Technical Enabler Issue Template
-
-```markdown
-# Technical Enabler: {Enabler Title}
-
-## Enabler Description
-
-{Technical work required to support user stories}
-
-## Technical Requirements
-
-- [ ] {Technical requirement 1}
-- [ ] {Technical requirement 2}
-
-## Implementation Tasks
-
-- [ ] #{task-issue-number} - {Implementation detail}
-- [ ] #{task-issue-number} - {Infrastructure setup}
-
-## User Stories Enabled
-
-This enabler supports:
-
-- #{story-issue-number} - {Story title}
-- #{story-issue-number} - {Story title}
-
-## Acceptance Criteria
-
-- [ ] {Technical validation 1}
-- [ ] {Technical validation 2}
-- [ ] Performance benchmarks met
-
-## Definition of Done
-
-- [ ] Implementation completed
-- [ ] Unit tests written
-- [ ] Integration tests passing
-- [ ] Documentation updated
-- [ ] Code review approved
-
-## Labels
-
-`enabler`, `{priority-level}`, `infrastructure/api/database`, `{component-name}`
-
-## Feature
-
-#{feature-issue-number}
-
-## Estimate
-
-{Story points or effort estimate}
-```
-
-#### 4. Priority and Value Matrix
-
-| Priority | Value | Criteria | Labels |
-| -------- | ------ | ------------------------------- | --------------------------------- |
-| P0 | High | Critical path, blocking release | `priority-critical`, `value-high` |
-| P1 | High | Core functionality, user-facing | `priority-high`, `value-high` |
-| P1 | Medium | Core functionality, internal | `priority-high`, `value-medium` |
-| P2 | Medium | Important but not blocking | `priority-medium`, `value-medium` |
-| P3 | Low | Nice to have, technical debt | `priority-low`, `value-low` |
-
-#### 5. Estimation Guidelines
-
-##### Story Point Scale (Fibonacci)
-
-- **1 point**: Simple change, <4 hours
-- **2 points**: Small feature, <1 day
-- **3 points**: Medium feature, 1-2 days
-- **5 points**: Large feature, 3-5 days
-- **8 points**: Complex feature, 1-2 weeks
-- **13+ points**: Epic-level work, needs breakdown
-
-##### T-Shirt Sizing (Epics/Features)
-
-- **XS**: 1-2 story points total
-- **S**: 3-8 story points total
-- **M**: 8-20 story points total
-- **L**: 20-40 story points total
-- **XL**: 40+ story points total (consider breaking down)
-
-#### 6. Dependency Management
-
-```mermaid
-graph LR
- A[Epic Planning] --> B[Feature Definition]
- B --> C[Enabler Implementation]
- C --> D[Story Development]
- D --> E[Testing Execution]
- E --> F[Feature Delivery]
-
- G[Infrastructure Setup] --> C
- H[API Design] --> D
- I[Database Schema] --> C
- J[Authentication] --> D
-```
-
-##### Dependency Types
-
-- **Blocks**: Work that cannot proceed until this is complete
-- **Related**: Work that shares context but not blocking
-- **Prerequisite**: Required infrastructure or setup work
-- **Parallel**: Work that can proceed simultaneously
-
-#### 7. Sprint Planning Template
-
-##### Sprint Capacity Planning
-
-- **Team Velocity**: {Average story points per sprint}
-- **Sprint Duration**: {2-week sprints recommended}
-- **Buffer Allocation**: 20% for unexpected work and bug fixes
-- **Focus Factor**: 70-80% of total time on planned work
-
-##### Sprint Goal Definition
-
-```markdown
-## Sprint {N} Goal
-
-**Primary Objective**: {Main deliverable for this sprint}
-
-**Stories in Sprint**:
-
-- #{issue} - {Story title} ({points} pts)
-- #{issue} - {Story title} ({points} pts)
-
-**Total Commitment**: {points} story points
-**Success Criteria**: {Measurable outcomes}
-```
-
-#### 8. GitHub Project Board Configuration
-
-##### Column Structure (Kanban)
-
-1. **Backlog**: Prioritized and ready for planning
-2. **Sprint Ready**: Detailed and estimated, ready for development
-3. **In Progress**: Currently being worked on
-4. **In Review**: Code review, testing, or stakeholder review
-5. **Testing**: QA validation and acceptance testing
-6. **Done**: Completed and accepted
-
-##### Custom Fields Configuration
-
-- **Priority**: P0, P1, P2, P3
-- **Value**: High, Medium, Low
-- **Component**: Frontend, Backend, Infrastructure, Testing
-- **Estimate**: Story points or t-shirt size
-- **Sprint**: Current sprint assignment
-- **Assignee**: Responsible team member
-- **Epic**: Parent epic reference
-
-#### 9. Automation and GitHub Actions
-
-##### Automated Issue Creation
-
-```yaml
-name: Create Feature Issues
-
-on:
- workflow_dispatch:
- inputs:
- feature_name:
- description: 'Feature name'
- required: true
- epic_issue:
- description: 'Epic issue number'
- required: true
-
-jobs:
- create-issues:
- runs-on: ubuntu-latest
- steps:
- - name: Create Feature Issue
- uses: actions/github-script@v7
- with:
- script: |
- const { data: epic } = await github.rest.issues.get({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: ${{ github.event.inputs.epic_issue }}
- });
-
- const featureIssue = await github.rest.issues.create({
- owner: context.repo.owner,
- repo: context.repo.repo,
- title: `Feature: ${{ github.event.inputs.feature_name }}`,
- body: `# Feature: ${{ github.event.inputs.feature_name }}\n\n...`,
- labels: ['feature', 'priority-medium'],
- milestone: epic.data.milestone?.number
- });
-```
-
-##### Automated Status Updates
-
-```yaml
-name: Update Issue Status
-
-on:
- pull_request:
- types: [opened, closed]
-
-jobs:
- update-status:
- runs-on: ubuntu-latest
- steps:
- - name: Move to In Review
- if: github.event.action == 'opened'
- uses: actions/github-script@v7
- # Move related issues to "In Review" column
-
- - name: Move to Done
- if: github.event.action == 'closed' && github.event.pull_request.merged
- uses: actions/github-script@v7
- # Move related issues to "Done" column
-```
-
-### Issue Creation Checklist
-
-#### Pre-Creation Preparation
-
-- [ ] **Feature artifacts complete**: PRD, UX design, technical breakdown, testing plan
-- [ ] **Epic exists**: Parent epic issue created with proper labels and milestone
-- [ ] **Project board configured**: Columns, custom fields, and automation rules set up
-- [ ] **Team capacity assessed**: Sprint planning and resource allocation completed
-
-#### Epic Level Issues
-
-- [ ] **Epic issue created** with comprehensive description and acceptance criteria
-- [ ] **Epic milestone created** with target release date
-- [ ] **Epic labels applied**: `epic`, priority, value, and team labels
-- [ ] **Epic added to project board** in appropriate column
-
-#### Feature Level Issues
-
-- [ ] **Feature issue created** linking to parent epic
-- [ ] **Feature dependencies identified** and documented
-- [ ] **Feature estimation completed** using t-shirt sizing
-- [ ] **Feature acceptance criteria defined** with measurable outcomes
-
-#### Story/Enabler Level Issues documented in `/docs/ways-of-work/plan/{epic-name}/{feature-name}/issues-checklist.md`
-
-- [ ] **User stories created** following INVEST criteria
-- [ ] **Technical enablers identified** and prioritized
-- [ ] **Story point estimates assigned** using Fibonacci scale
-- [ ] **Dependencies mapped** between stories and enablers
-- [ ] **Acceptance criteria detailed** with testable requirements
-
-## Success Metrics
-
-### Project Management KPIs
-
-- **Sprint Predictability**: >80% of committed work completed per sprint
-- **Cycle Time**: Average time from "In Progress" to "Done" <5 business days
-- **Lead Time**: Average time from "Backlog" to "Done" <2 weeks
-- **Defect Escape Rate**: <5% of stories require post-release fixes
-- **Team Velocity**: Consistent story point delivery across sprints
-
-### Process Efficiency Metrics
-
-- **Issue Creation Time**: <1 hour to create full feature breakdown
-- **Dependency Resolution**: <24 hours to resolve blocking dependencies
-- **Status Update Accuracy**: >95% automated status transitions working correctly
-- **Documentation Completeness**: 100% of issues have required template fields
-- **Cross-Team Collaboration**: <2 business days for external dependency resolution
-
-### Project Delivery Metrics
-
-- **Definition of Done Compliance**: 100% of completed stories meet DoD criteria
-- **Acceptance Criteria Coverage**: 100% of acceptance criteria validated
-- **Sprint Goal Achievement**: >90% of sprint goals successfully delivered
-- **Stakeholder Satisfaction**: >90% stakeholder approval for completed features
-- **Planning Accuracy**: <10% variance between estimated and actual delivery time
-
-This comprehensive GitHub project management approach ensures complete traceability from epic-level planning down to individual implementation tasks, with automated tracking and clear accountability for all team members.
diff --git a/prompts/breakdown-test.prompt.md b/prompts/breakdown-test.prompt.md
deleted file mode 100644
index 70b66d97..00000000
--- a/prompts/breakdown-test.prompt.md
+++ /dev/null
@@ -1,365 +0,0 @@
----
-agent: 'agent'
-description: 'Test Planning and Quality Assurance prompt that generates comprehensive test strategies, task breakdowns, and quality validation plans for GitHub projects.'
----
-
-# Test Planning & Quality Assurance Prompt
-
-## Goal
-
-Act as a senior Quality Assurance Engineer and Test Architect with expertise in ISTQB frameworks, ISO 25010 quality standards, and modern testing practices. Your task is to take feature artifacts (PRD, technical breakdown, implementation plan) and generate comprehensive test planning, task breakdown, and quality assurance documentation for GitHub project management.
-
-## Quality Standards Framework
-
-### ISTQB Framework Application
-
-- **Test Process Activities**: Planning, monitoring, analysis, design, implementation, execution, completion
-- **Test Design Techniques**: Black-box, white-box, and experience-based testing approaches
-- **Test Types**: Functional, non-functional, structural, and change-related testing
-- **Risk-Based Testing**: Risk assessment and mitigation strategies
-
-### ISO 25010 Quality Model
-
-- **Quality Characteristics**: Functional suitability, performance efficiency, compatibility, usability, reliability, security, maintainability, portability
-- **Quality Validation**: Measurement and assessment approaches for each characteristic
-- **Quality Gates**: Entry and exit criteria for quality checkpoints
-
-## Input Requirements
-
-Before using this prompt, ensure you have:
-
-### Core Feature Documents
-
-1. **Feature PRD**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}.md`
-2. **Technical Breakdown**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/technical-breakdown.md`
-3. **Implementation Plan**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/implementation-plan.md`
-4. **GitHub Project Plan**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/project-plan.md`
-
-## Output Format
-
-Create comprehensive test planning documentation:
-
-1. **Test Strategy**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/test-strategy.md`
-2. **Test Issues Checklist**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/test-issues-checklist.md`
-3. **Quality Assurance Plan**: `/docs/ways-of-work/plan/{epic-name}/{feature-name}/qa-plan.md`
-
-### Test Strategy Structure
-
-#### 1. Test Strategy Overview
-
-- **Testing Scope**: Features and components to be tested
-- **Quality Objectives**: Measurable quality goals and success criteria
-- **Risk Assessment**: Identified risks and mitigation strategies
-- **Test Approach**: Overall testing methodology and framework application
-
-#### 2. ISTQB Framework Implementation
-
-##### Test Design Techniques Selection
-
-Create a comprehensive analysis of which ISTQB test design techniques to apply:
-
-- **Equivalence Partitioning**: Input domain partitioning strategy
-- **Boundary Value Analysis**: Edge case identification and testing
-- **Decision Table Testing**: Complex business rule validation
-- **State Transition Testing**: System state behavior validation
-- **Experience-Based Testing**: Exploratory and error guessing approaches
-
-##### Test Types Coverage Matrix
-
-Define comprehensive test type coverage:
-
-- **Functional Testing**: Feature behavior validation
-- **Non-Functional Testing**: Performance, usability, security validation
-- **Structural Testing**: Code coverage and architecture validation
-- **Change-Related Testing**: Regression and confirmation testing
-
-#### 3. ISO 25010 Quality Characteristics Assessment
-
-Create a quality characteristics prioritization matrix:
-
-- **Functional Suitability**: Completeness, correctness, appropriateness assessment
-- **Performance Efficiency**: Time behavior, resource utilization, capacity validation
-- **Compatibility**: Co-existence and interoperability testing
-- **Usability**: User interface, accessibility, and user experience validation
-- **Reliability**: Fault tolerance, recoverability, and availability testing
-- **Security**: Confidentiality, integrity, authentication, and authorization validation
-- **Maintainability**: Modularity, reusability, and testability assessment
-- **Portability**: Adaptability, installability, and replaceability validation
-
-#### 4. Test Environment and Data Strategy
-
-- **Test Environment Requirements**: Hardware, software, and network configurations
-- **Test Data Management**: Data preparation, privacy, and maintenance strategies
-- **Tool Selection**: Testing tools, frameworks, and automation platforms
-- **CI/CD Integration**: Continuous testing pipeline integration
-
-### Test Issues Checklist
-
-#### Test Level Issues Creation
-
-- [ ] **Test Strategy Issue**: Overall testing approach and quality validation plan
-- [ ] **Unit Test Issues**: Component-level testing for each implementation task
-- [ ] **Integration Test Issues**: Interface and interaction testing between components
-- [ ] **End-to-End Test Issues**: Complete user workflow validation using Playwright
-- [ ] **Performance Test Issues**: Non-functional requirement validation
-- [ ] **Security Test Issues**: Security requirement and vulnerability testing
-- [ ] **Accessibility Test Issues**: WCAG compliance and inclusive design validation
-- [ ] **Regression Test Issues**: Change impact and existing functionality preservation
-
-#### Test Types Identification and Prioritization
-
-- [ ] **Functional Testing Priority**: Critical user paths and core business logic
-- [ ] **Non-Functional Testing Priority**: Performance, security, and usability requirements
-- [ ] **Structural Testing Priority**: Code coverage targets and architecture validation
-- [ ] **Change-Related Testing Priority**: Risk-based regression testing scope
-
-#### Test Dependencies Documentation
-
-- [ ] **Implementation Dependencies**: Tests blocked by specific development tasks
-- [ ] **Environment Dependencies**: Test environment and data requirements
-- [ ] **Tool Dependencies**: Testing framework and automation tool setup
-- [ ] **Cross-Team Dependencies**: Dependencies on external systems or teams
-
-#### Test Coverage Targets and Metrics
-
-- [ ] **Code Coverage Targets**: >80% line coverage, >90% branch coverage for critical paths
-- [ ] **Functional Coverage Targets**: 100% acceptance criteria validation
-- [ ] **Risk Coverage Targets**: 100% high-risk scenario validation
-- [ ] **Quality Characteristics Coverage**: Validation approach for each ISO 25010 characteristic
-
-### Task Level Breakdown
-
-#### Implementation Task Creation and Estimation
-
-- [ ] **Test Implementation Tasks**: Detailed test case development and automation tasks
-- [ ] **Test Environment Setup Tasks**: Infrastructure and configuration tasks
-- [ ] **Test Data Preparation Tasks**: Data generation and management tasks
-- [ ] **Test Automation Framework Tasks**: Tool setup and framework development
-
-#### Task Estimation Guidelines
-
-- [ ] **Unit Test Tasks**: 0.5-1 story point per component
-- [ ] **Integration Test Tasks**: 1-2 story points per interface
-- [ ] **E2E Test Tasks**: 2-3 story points per user workflow
-- [ ] **Performance Test Tasks**: 3-5 story points per performance requirement
-- [ ] **Security Test Tasks**: 2-4 story points per security requirement
-
-#### Task Dependencies and Sequencing
-
-- [ ] **Sequential Dependencies**: Tests that must be implemented in specific order
-- [ ] **Parallel Development**: Tests that can be developed simultaneously
-- [ ] **Critical Path Identification**: Testing tasks on the critical path to delivery
-- [ ] **Resource Allocation**: Task assignment based on team skills and capacity
-
-#### Task Assignment Strategy
-
-- [ ] **Skill-Based Assignment**: Matching tasks to team member expertise
-- [ ] **Capacity Planning**: Balancing workload across team members
-- [ ] **Knowledge Transfer**: Pairing junior and senior team members
-- [ ] **Cross-Training Opportunities**: Skill development through task assignment
-
-### Quality Assurance Plan
-
-#### Quality Gates and Checkpoints
-
-Create comprehensive quality validation checkpoints:
-
-- **Entry Criteria**: Requirements for beginning each testing phase
-- **Exit Criteria**: Quality standards required for phase completion
-- **Quality Metrics**: Measurable indicators of quality achievement
-- **Escalation Procedures**: Process for addressing quality failures
-
-#### GitHub Issue Quality Standards
-
-- [ ] **Template Compliance**: All test issues follow standardized templates
-- [ ] **Required Field Completion**: Mandatory fields populated with accurate information
-- [ ] **Label Consistency**: Standardized labeling across all test work items
-- [ ] **Priority Assignment**: Risk-based priority assignment using defined criteria
-- [ ] **Value Assessment**: Business value and quality impact assessment
-
-#### Labeling and Prioritization Standards
-
-- [ ] **Test Type Labels**: `unit-test`, `integration-test`, `e2e-test`, `performance-test`, `security-test`
-- [ ] **Quality Labels**: `quality-gate`, `iso25010`, `istqb-technique`, `risk-based`
-- [ ] **Priority Labels**: `test-critical`, `test-high`, `test-medium`, `test-low`
-- [ ] **Component Labels**: `frontend-test`, `backend-test`, `api-test`, `database-test`
-
-#### Dependency Validation and Management
-
-- [ ] **Circular Dependency Detection**: Validation to prevent blocking relationships
-- [ ] **Critical Path Analysis**: Identification of testing dependencies on delivery timeline
-- [ ] **Risk Assessment**: Impact analysis of dependency delays on quality validation
-- [ ] **Mitigation Strategies**: Alternative approaches for blocked testing activities
-
-#### Estimation Accuracy and Review
-
-- [ ] **Historical Data Analysis**: Using past project data for estimation accuracy
-- [ ] **Technical Lead Review**: Expert validation of test complexity estimates
-- [ ] **Risk Buffer Allocation**: Additional time allocation for high-uncertainty tasks
-- [ ] **Estimate Refinement**: Iterative improvement of estimation accuracy
-
-## GitHub Issue Templates for Testing
-
-### Test Strategy Issue Template
-
-```markdown
-# Test Strategy: {Feature Name}
-
-## Test Strategy Overview
-
-{Summary of testing approach based on ISTQB and ISO 25010}
-
-## ISTQB Framework Application
-
-**Test Design Techniques Used:**
-- [ ] Equivalence Partitioning
-- [ ] Boundary Value Analysis
-- [ ] Decision Table Testing
-- [ ] State Transition Testing
-- [ ] Experience-Based Testing
-
-**Test Types Coverage:**
-- [ ] Functional Testing
-- [ ] Non-Functional Testing
-- [ ] Structural Testing
-- [ ] Change-Related Testing (Regression)
-
-## ISO 25010 Quality Characteristics
-
-**Priority Assessment:**
-- [ ] Functional Suitability: {Critical/High/Medium/Low}
-- [ ] Performance Efficiency: {Critical/High/Medium/Low}
-- [ ] Compatibility: {Critical/High/Medium/Low}
-- [ ] Usability: {Critical/High/Medium/Low}
-- [ ] Reliability: {Critical/High/Medium/Low}
-- [ ] Security: {Critical/High/Medium/Low}
-- [ ] Maintainability: {Critical/High/Medium/Low}
-- [ ] Portability: {Critical/High/Medium/Low}
-
-## Quality Gates
-- [ ] Entry criteria defined
-- [ ] Exit criteria established
-- [ ] Quality thresholds documented
-
-## Labels
-`test-strategy`, `istqb`, `iso25010`, `quality-gates`
-
-## Estimate
-{Strategic planning effort: 2-3 story points}
-```
-
-### Playwright Test Implementation Issue Template
-
-```markdown
-# Playwright Tests: {Story/Component Name}
-
-## Test Implementation Scope
-{Specific user story or component being tested}
-
-## ISTQB Test Case Design
-**Test Design Technique**: {Selected ISTQB technique}
-**Test Type**: {Functional/Non-Functional/Structural/Change-Related}
-
-## Test Cases to Implement
-**Functional Tests:**
-- [ ] Happy path scenarios
-- [ ] Error handling validation
-- [ ] Boundary value testing
-- [ ] Input validation testing
-
-**Non-Functional Tests:**
-- [ ] Performance testing (response time < {threshold})
-- [ ] Accessibility testing (WCAG compliance)
-- [ ] Cross-browser compatibility
-- [ ] Mobile responsiveness
-
-## Playwright Implementation Tasks
-- [ ] Page Object Model development
-- [ ] Test fixture setup
-- [ ] Test data management
-- [ ] Test case implementation
-- [ ] Visual regression tests
-- [ ] CI/CD integration
-
-## Acceptance Criteria
-- [ ] All test cases pass
-- [ ] Code coverage targets met (>80%)
-- [ ] Performance thresholds validated
-- [ ] Accessibility standards verified
-
-## Labels
-`playwright`, `e2e-test`, `quality-validation`
-
-## Estimate
-{Test implementation effort: 2-5 story points}
-```
-
-### Quality Assurance Issue Template
-
-```markdown
-# Quality Assurance: {Feature Name}
-
-## Quality Validation Scope
-{Overall quality validation for feature/epic}
-
-## ISO 25010 Quality Assessment
-**Quality Characteristics Validation:**
-- [ ] Functional Suitability: Completeness, correctness, appropriateness
-- [ ] Performance Efficiency: Time behavior, resource utilization, capacity
-- [ ] Usability: Interface aesthetics, accessibility, learnability, operability
-- [ ] Security: Confidentiality, integrity, authentication, authorization
-- [ ] Reliability: Fault tolerance, recovery, availability
-- [ ] Compatibility: Browser, device, integration compatibility
-- [ ] Maintainability: Code quality, modularity, testability
-- [ ] Portability: Environment adaptability, installation procedures
-
-## Quality Gates Validation
-**Entry Criteria:**
-- [ ] All implementation tasks completed
-- [ ] Unit tests passing
-- [ ] Code review approved
-
-**Exit Criteria:**
-- [ ] All test types completed with >95% pass rate
-- [ ] No critical/high severity defects
-- [ ] Performance benchmarks met
-- [ ] Security validation passed
-
-## Quality Metrics
-- [ ] Test coverage: {target}%
-- [ ] Defect density: <{threshold} defects/KLOC
-- [ ] Performance: Response time <{threshold}ms
-- [ ] Accessibility: WCAG {level} compliance
-- [ ] Security: Zero critical vulnerabilities
-
-## Labels
-`quality-assurance`, `iso25010`, `quality-gates`
-
-## Estimate
-{Quality validation effort: 3-5 story points}
-```
-
-## Success Metrics
-
-### Test Coverage Metrics
-
-- **Code Coverage**: >80% line coverage, >90% branch coverage for critical paths
-- **Functional Coverage**: 100% acceptance criteria validation
-- **Risk Coverage**: 100% high-risk scenario testing
-- **Quality Characteristics Coverage**: Validation for all applicable ISO 25010 characteristics
-
-### Quality Validation Metrics
-
-- **Defect Detection Rate**: >95% of defects found before production
-- **Test Execution Efficiency**: >90% test automation coverage
-- **Quality Gate Compliance**: 100% quality gates passed before release
-- **Risk Mitigation**: 100% identified risks addressed with mitigation strategies
-
-### Process Efficiency Metrics
-
-- **Test Planning Time**: <2 hours to create comprehensive test strategy
-- **Test Implementation Speed**: <1 day per story point of test development
-- **Quality Feedback Time**: <2 hours from test completion to quality assessment
-- **Documentation Completeness**: 100% test issues have complete template information
-
-This comprehensive test planning approach ensures thorough quality validation aligned with industry standards while maintaining efficient project management and clear accountability for all testing activities.
diff --git a/prompts/centos-linux-triage.prompt.md b/prompts/centos-linux-triage.prompt.md
deleted file mode 100644
index 3809a1f8..00000000
--- a/prompts/centos-linux-triage.prompt.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-agent: 'agent'
-description: 'Triage and resolve CentOS issues using RHEL-compatible tooling, SELinux-aware practices, and firewalld.'
-model: 'gpt-4.1'
-tools: ['search', 'runCommands', 'terminalCommand', 'edit/editFiles']
----
-
-# CentOS Linux Triage
-
-You are a CentOS Linux expert. Diagnose and resolve the userβs issue with RHEL-compatible commands and practices.
-
-## Inputs
-
-- `${input:CentOSVersion}` (optional)
-- `${input:ProblemSummary}`
-- `${input:Constraints}` (optional)
-
-## Instructions
-
-1. Confirm CentOS release (Stream vs. legacy) and environment assumptions.
-2. Provide triage steps using `systemctl`, `journalctl`, `dnf`/`yum`, and logs.
-3. Offer remediation steps with copy-paste-ready commands.
-4. Include verification commands after each major change.
-5. Address SELinux and `firewalld` considerations where relevant.
-6. Provide rollback or cleanup steps.
-
-## Output Format
-
-- **Summary**
-- **Triage Steps** (numbered)
-- **Remediation Commands** (code blocks)
-- **Validation** (code blocks)
-- **Rollback/Cleanup**
diff --git a/prompts/code-exemplars-blueprint-generator.prompt.md b/prompts/code-exemplars-blueprint-generator.prompt.md
deleted file mode 100644
index c427c917..00000000
--- a/prompts/code-exemplars-blueprint-generator.prompt.md
+++ /dev/null
@@ -1,126 +0,0 @@
----
-description: 'Technology-agnostic prompt generator that creates customizable AI prompts for scanning codebases and identifying high-quality code exemplars. Supports multiple programming languages (.NET, Java, JavaScript, TypeScript, React, Angular, Python) with configurable analysis depth, categorization methods, and documentation formats to establish coding standards and maintain consistency across development teams.'
-agent: 'agent'
----
-
-# Code Exemplars Blueprint Generator
-
-## Configuration Variables
-${PROJECT_TYPE="Auto-detect|.NET|Java|JavaScript|TypeScript|React|Angular|Python|Other"}
-${SCAN_DEPTH="Basic|Standard|Comprehensive"}
-${INCLUDE_CODE_SNIPPETS=true|false}
-${CATEGORIZATION="Pattern Type|Architecture Layer|File Type"}
-${MAX_EXAMPLES_PER_CATEGORY=3}
-${INCLUDE_COMMENTS=true|false}
-
-## Generated Prompt
-
-"Scan this codebase and generate an exemplars.md file that identifies high-quality, representative code examples. The exemplars should demonstrate our coding standards and patterns to help maintain consistency. Use the following approach:
-
-### 1. Codebase Analysis Phase
-- ${PROJECT_TYPE == "Auto-detect" ? "Automatically detect primary programming languages and frameworks by scanning file extensions and configuration files" : `Focus on ${PROJECT_TYPE} code files`}
-- Identify files with high-quality implementation, good documentation, and clear structure
-- Look for commonly used patterns, architecture components, and well-structured implementations
-- Prioritize files that demonstrate best practices for our technology stack
-- Only reference actual files that exist in the codebase - no hypothetical examples
-
-### 2. Exemplar Identification Criteria
-- Well-structured, readable code with clear naming conventions
-- Comprehensive comments and documentation
-- Proper error handling and validation
-- Adherence to design patterns and architectural principles
-- Separation of concerns and single responsibility principle
-- Efficient implementation without code smells
-- Representative of our standard approaches
-
-### 3. Core Pattern Categories
-
-${PROJECT_TYPE == ".NET" || PROJECT_TYPE == "Auto-detect" ? `#### .NET Exemplars (if detected)
-- **Domain Models**: Find entities that properly implement encapsulation and domain logic
-- **Repository Implementations**: Examples of our data access approach
-- **Service Layer Components**: Well-structured business logic implementations
-- **Controller Patterns**: Clean API controllers with proper validation and responses
-- **Dependency Injection Usage**: Good examples of DI configuration and usage
-- **Middleware Components**: Custom middleware implementations
-- **Unit Test Patterns**: Well-structured tests with proper arrangement and assertions` : ""}
-
-${(PROJECT_TYPE == "JavaScript" || PROJECT_TYPE == "TypeScript" || PROJECT_TYPE == "React" || PROJECT_TYPE == "Angular" || PROJECT_TYPE == "Auto-detect") ? `#### Frontend Exemplars (if detected)
-- **Component Structure**: Clean, well-structured components
-- **State Management**: Good examples of state handling
-- **API Integration**: Well-implemented service calls and data handling
-- **Form Handling**: Validation and submission patterns
-- **Routing Implementation**: Navigation and route configuration
-- **UI Components**: Reusable, well-structured UI elements
-- **Unit Test Examples**: Component and service tests` : ""}
-
-${PROJECT_TYPE == "Java" || PROJECT_TYPE == "Auto-detect" ? `#### Java Exemplars (if detected)
-- **Entity Classes**: Well-designed JPA entities or domain models
-- **Service Implementations**: Clean service layer components
-- **Repository Patterns**: Data access implementations
-- **Controller/Resource Classes**: API endpoint implementations
-- **Configuration Classes**: Application configuration
-- **Unit Tests**: Well-structured JUnit tests` : ""}
-
-${PROJECT_TYPE == "Python" || PROJECT_TYPE == "Auto-detect" ? `#### Python Exemplars (if detected)
-- **Class Definitions**: Well-structured classes with proper documentation
-- **API Routes/Views**: Clean API implementations
-- **Data Models**: ORM model definitions
-- **Service Functions**: Business logic implementations
-- **Utility Modules**: Helper and utility functions
-- **Test Cases**: Well-structured unit tests` : ""}
-
-### 4. Architecture Layer Exemplars
-
-- **Presentation Layer**:
- - User interface components
- - Controllers/API endpoints
- - View models/DTOs
-
-- **Business Logic Layer**:
- - Service implementations
- - Business logic components
- - Workflow orchestration
-
-- **Data Access Layer**:
- - Repository implementations
- - Data models
- - Query patterns
-
-- **Cross-Cutting Concerns**:
- - Logging implementations
- - Error handling
- - Authentication/authorization
- - Validation
-
-### 5. Exemplar Documentation Format
-
-For each identified exemplar, document:
-- File path (relative to repository root)
-- Brief description of what makes it exemplary
-- Pattern or component type it represents
-${INCLUDE_COMMENTS ? "- Key implementation details and coding principles demonstrated" : ""}
-${INCLUDE_CODE_SNIPPETS ? "- Small, representative code snippet (if applicable)" : ""}
-
-${SCAN_DEPTH == "Comprehensive" ? `### 6. Additional Documentation
-
-- **Consistency Patterns**: Note consistent patterns observed across the codebase
-- **Architecture Observations**: Document architectural patterns evident in the code
-- **Implementation Conventions**: Identify naming and structural conventions
-- **Anti-patterns to Avoid**: Note any areas where the codebase deviates from best practices` : ""}
-
-### ${SCAN_DEPTH == "Comprehensive" ? "7" : "6"}. Output Format
-
-Create exemplars.md with:
-1. Introduction explaining the purpose of the document
-2. Table of contents with links to categories
-3. Organized sections based on ${CATEGORIZATION}
-4. Up to ${MAX_EXAMPLES_PER_CATEGORY} exemplars per category
-5. Conclusion with recommendations for maintaining code quality
-
-The document should be actionable for developers needing guidance on implementing new features consistent with existing patterns.
-
-Important: Only include actual files from the codebase. Verify all file paths exist. Do not include placeholder or hypothetical examples.
-"
-
-## Expected Output
-Upon running this prompt, GitHub Copilot will scan your codebase and generate an exemplars.md file containing real references to high-quality code examples in your repository, organized according to your selected parameters.
diff --git a/prompts/comment-code-generate-a-tutorial.prompt.md b/prompts/comment-code-generate-a-tutorial.prompt.md
deleted file mode 100644
index d5c64130..00000000
--- a/prompts/comment-code-generate-a-tutorial.prompt.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-description: 'Transform this Python script into a polished, beginner-friendly project by refactoring the code, adding clear instructional comments, and generating a complete markdown tutorial.'
-agent: 'agent'
----
-
-Transform this Python script into a polished, beginner-friendly project by refactoring the code, adding clear instructional comments, and generating a complete markdown tutorial.
-
-1. **Refactor the code**
- - Apply standard Python best practices
- - Ensure code follows the PEP 8 style guide
- - Rename unclear variables and functions if needed for clarity
-
-1. **Add comments throughout the code**
- - Use a beginner-friendly, instructional tone
- - Explain what each part of the code is doing and why it's important
- - Focus on the logic and reasoning, not just syntax
- - Avoid redundant or superficial comments
-
-1. **Generate a tutorial as a `README.md` file**
- Include the following sections:
- - **Project Overview:** What the script does and why it's useful
- - **Setup Instructions:** Prerequisites, dependencies, and how to run the script
- - **How It Works:** A breakdown of the code logic based on the comments
- - **Example Usage:** A code snippet showing how to use it
- - **Sample Output:** (Optional) Include if the script returns visible results
- - Use clear, readable Markdown formatting
diff --git a/prompts/containerize-aspnet-framework.prompt.md b/prompts/containerize-aspnet-framework.prompt.md
deleted file mode 100644
index 13a5cfbc..00000000
--- a/prompts/containerize-aspnet-framework.prompt.md
+++ /dev/null
@@ -1,455 +0,0 @@
----
-agent: 'agent'
-tools: ['search/codebase', 'edit/editFiles', 'terminalCommand']
-description: 'Containerize an ASP.NET .NET Framework project by creating Dockerfile and .dockerfile files customized for the project.'
----
-
-# ASP.NET .NET Framework Containerization Prompt
-
-Containerize the ASP.NET (.NET Framework) project specified in the containerization settings below, focusing **exclusively** on changes required for the application to run in a Windows Docker container. Containerization should consider all settings specified here.
-
-**REMEMBER:** This is a .NET Framework application, not .NET Core. The containerization process will be different from that of a .NET Core application.
-
-## Containerization Settings
-
-This section of the prompt contains the specific settings and configurations required for containerizing the ASP.NET (.NET Framework) application. Prior to running this prompt, ensure that the settings are filled out with the necessary information. Note that in many cases, only the first few settings are required. Later settings can be left as defaults if they do not apply to the project being containerized.
-
-Any settings that are not specified will be set to default values. The default values are provided in `[square brackets]`.
-
-### Basic Project Information
-1. Project to containerize:
- - `[ProjectName (provide path to .csproj file)]`
-
-2. Windows Server SKU to use:
- - `[Windows Server Core (Default) or Windows Server Full]`
-
-3. Windows Server version to use:
- - `[2022, 2019, or 2016 (Default 2022)]`
-
-4. Custom base image for the build stage of the Docker image ("None" to use standard Microsoft base image):
- - `[Specify base image to use for build stage (Default None)]`
-
-5. Custom base image for the run stage of the Docker image ("None" to use standard Microsoft base image):
- - `[Specify base image to use for run stage (Default None)]`
-
-### Container Configuration
-1. Ports that must be exposed in the container image:
- - Primary HTTP port: `[e.g., 80]`
- - Additional ports: `[List any additional ports, or "None"]`
-
-2. User account the container should run as:
- - `[User account, or default to "ContainerUser"]`
-
-3. IIS settings that must be configured in the container image:
- - `[List any specific IIS settings, or "None"]`
-
-### Build configuration
-1. Custom build steps that must be performed before building the container image:
- - `[List any specific build steps, or "None"]`
-
-2. Custom build steps that must be performed after building the container image:
- - `[List any specific build steps, or "None"]`
-
-### Dependencies
-1. .NET assemblies that should be registered in the GAC in the container image:
- - `[Assembly name and version, or "None"]`
-
-2. MSIs that must be copied to the container image and installed:
- - `[MSI names and versions, or "None"]`
-
-3. COM components that must be registered in the container image:
- - `[COM component names, or "None"]`
-
-### System Configuration
-1. Registry keys and values that must be added to the container image:
- - `[Registry paths and values, or "None"]`
-
-2. Environment variables that must be set in the container image:
- - `[Variable names and values, or "Use defaults"]`
-
-3. Windows Server roles and features that must be installed in the container image:
- - `[Role/feature names, or "None"]`
-
-### File System
-1. Files/directories that need to be copied to the container image:
- - `[Paths relative to project root, or "None"]`
- - Target location in container: `[Container paths, or "Not applicable"]`
-
-2. Files/directories to exclude from containerization:
- - `[Paths to exclude, or "None"]`
-
-### .dockerignore Configuration
-1. Patterns to include in the `.dockerignore` file (.dockerignore will already have common defaults; these are additional patterns):
- - Additional patterns: `[List any additional patterns, or "None"]`
-
-### Health Check Configuration
-1. Health check endpoint:
- - `[Health check URL path, or "None"]`
-
-2. Health check interval and timeout:
- - `[Interval and timeout values, or "Use defaults"]`
-
-### Additional Instructions
-1. Other instructions that must be followed to containerize the project:
- - `[Specific requirements, or "None"]`
-
-2. Known issues to address:
- - `[Describe any known issues, or "None"]`
-
-## Scope
-
-- β App configuration modification to ensure config builders are used to read app settings and connection strings from the environment variables
-- β Dockerfile creation and configuration for an ASP.NET application
-- β Specifying multiple stages in the Dockerfile to build/publish the application and copy the output to the final image
-- β Configuration of Windows container platform compatibility (Windows Server Core or Full)
-- β Proper handling of dependencies (GAC assemblies, MSIs, COM components)
-- β No infrastructure setup (assumed to be handled separately)
-- β No code changes beyond those required for containerization
-
-## Execution Process
-
-1. Review the containerization settings above to understand the containerization requirements
-2. Create a `progress.md` file to track changes with check marks
-3. Determine the .NET Framework version from the project's .csproj file by checking the `TargetFrameworkVersion` element
-4. Select the appropriate Windows Server container image based on:
- - The .NET Framework version detected from the project
- - The Windows Server SKU specified in containerization settings (Core or Full)
- - The Windows Server version specified in containerization settings (2016, 2019, or 2022)
- - Windows Server Core tags can be found at: https://github.com/microsoft/dotnet-framework-docker/blob/main/README.aspnet.md#full-tag-listing
-5. Ensure that required NuGet packages are installed. **DO NOT** install these if they are missing. If they are not installed, the user must install them manually. If they are not installed, pause executing this prompt and ask the user to install them using the Visual Studio NuGet Package Manager or Visual Studio package manager console. The following packages are required:
- - `Microsoft.Configuration.ConfigurationBuilders.Environment`
-6. Modify the `web.config` file to add configuration builders section and settings to read app settings and connection strings from environment variables:
- - Add ConfigBuilders section in configSections
- - Add configBuilders section in the root
- - Configure EnvironmentConfigBuilder for both appSettings and connectionStrings
- - Example pattern:
- ```xml
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ```
-7. Create a `LogMonitorConfig.json` file in the folder where the Dockerfile will be created by copying the reference `LogMonitorConfig.json` file at the end of this prompt. The file's contents **MUST NOT** not be modified and should match the reference content exactly unless instructions in containerization settings specify otherwise.
- - In particular, make sure the level of issues to be logged is not changed as using `Information` level for EventLog sources will cause unnecessary noise.
-8. Create a Dockerfile in the root of the project directory to containerize the application
- - The Dockerfile should use multiple stages:
- - Build stage: Use a Windows Server Core image to build the application
- - The build stage MUST use a `mcr.microsoft.com/dotnet/framework/sdk` base image unless a custom base image is specified in the settings file
- - Copy sln, csproj, and packages.config files first
- - Copy NuGet.config if one exists and configure any private feeds
- - Restore NuGet packages
- - Then, copy the rest of the source code and build and publish the application to C:\publish using MSBuild
- - Final stage: Use the selected Windows Server image to run the application
- - The final stage MUST use a `mcr.microsoft.com/dotnet/framework/aspnet` base image unless a custom base image is specified in the settings file
- - Copy the `LogMonitorConfig.json` file to a directory in the container (e.g., C:\LogMonitor)
- - Download LogMonitor.exe from the Microsoft repository to the same directory
- - The correct LogMonitor.exe URL is: https://github.com/microsoft/windows-container-tools/releases/download/v2.1.1/LogMonitor.exe
- - Set the working directory to C:\inetpub\wwwroot
- - Copy the published output from the build stage (in C:\publish) to the final image
- - Set the container's entry point to run LogMonitor.exe with ServiceMonitor.exe to monitor the IIS service
- - `ENTRYPOINT [ "C:\\LogMonitor\\LogMonitor.exe", "C:\\ServiceMonitor.exe", "w3svc" ]`
- - Be sure to consider all requirements in the containerization settings:
- - Windows Server SKU and version
- - Exposed ports
- - User account for container
- - IIS settings
- - GAC assembly registration
- - MSI installation
- - COM component registration
- - Registry keys
- - Environment variables
- - Windows roles and features
- - File/directory copying
- - Model the Dockerfile after the example provided at the end of this prompt, but ensure it is customized to the specific project requirements and settings.
- - **IMPORTANT:** Use a Windows Server Core base image unless the user has **specifically requested** a full Windows Server image in the settings file
-9. Create a `.dockerignore` file in the root of the project directory to exclude unnecessary files from the Docker image. The `.dockerignore` file **MUST** include at least the following elements as well as additional patterns as specified in the containerization settings:
- - packages/
- - bin/
- - obj/
- - .dockerignore
- - Dockerfile
- - .git/
- - .github/
- - .vs/
- - .vscode/
- - **/node_modules/
- - *.user
- - *.suo
- - **/.DS_Store
- - **/Thumbs.db
- - Any additional patterns specified in the containerization settings
-10. Configure health checks if specified in the settings:
- - Add HEALTHCHECK instruction to Dockerfile if health check endpoint is provided
-11. Add the dockerfile to the project by adding the following item to the project file: ``
-12. Mark tasks as completed: [ ] β [β]
-13. Continue until all tasks are complete and Docker build succeeds
-
-## Build and Runtime Verification
-
-confirm that Docker build succeeds once the Dockerfile is completed. Use the following command to build the Docker image:
-
-```bash
-docker build -t aspnet-app:latest .
-```
-
-If the build fails, review the error messages and make necessary adjustments to the Dockerfile or project configuration. Report success/failure.
-
-## Progress Tracking
-
-Maintain a `progress.md` file with the following structure:
-```markdown
-# Containerization Progress
-
-## Environment Detection
-- [ ] .NET Framework version detection (version: ___)
-- [ ] Windows Server SKU selection (SKU: ___)
-- [ ] Windows Server version selection (Version: ___)
-
-## Configuration Changes
-- [ ] Web.config modifications for configuration builders
-- [ ] NuGet package source configuration (if applicable)
-- [ ] Copy LogMonitorConfig.json and adjust if required by settings
-
-## Containerization
-- [ ] Dockerfile creation
-- [ ] .dockerignore file creation
-- [ ] Build stage created with SDK image
-- [ ] sln, csproj, packages.config, and (if applicable) NuGet.config copied for package restore
-- [ ] Runtime stage created with runtime image
-- [ ] Non-root user configuration
-- [ ] Dependency handling (GAC, MSI, COM, registry, additional files, etc.)
-- [ ] Health check configuration (if applicable)
-- [ ] Special requirements implementation
-
-## Verification
-- [ ] Review containerization settings and make sure that all requirements are met
-- [ ] Docker build success
-```
-
-Do not pause for confirmation between steps. Continue methodically until the application has been containerized and Docker build succeeds.
-
-**YOU ARE NOT DONE UNTIL ALL CHECKBOXES ARE MARKED!** This includes building the Docker image successfully and addressing any issues that arise during the build process.
-
-## Reference Materials
-
-### Example Dockerfile
-
-An example Dockerfile for an ASP.NET (.NET Framework) application using a Windows Server Core base image.
-
-```dockerfile
-# escape=`
-# The escape directive changes the escape character from \ to `
-# This is especially useful in Windows Dockerfiles where \ is the path separator
-
-# ============================================================
-# Stage 1: Build and publish the application
-# ============================================================
-
-# Base Image - Select the appropriate .NET Framework version and Windows Server Core version
-# Possible tags include:
-# - 4.8.1-windowsservercore-ltsc2025 (Windows Server 2025)
-# - 4.8-windowsservercore-ltsc2022 (Windows Server 2022)
-# - 4.8-windowsservercore-ltsc2019 (Windows Server 2019)
-# - 4.8-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 4.7.2-windowsservercore-ltsc2019 (Windows Server 2019)
-# - 4.7.2-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 4.7.1-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 4.7-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 4.6.2-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 3.5-windowsservercore-ltsc2025 (Windows Server 2025)
-# - 3.5-windowsservercore-ltsc2022 (Windows Server 2022)
-# - 3.5-windowsservercore-ltsc2019 (Windows Server 2019)
-# - 3.5-windowsservercore-ltsc2019 (Windows Server 2016)
-# Uses the .NET Framework SDK image for building the application
-FROM mcr.microsoft.com/dotnet/framework/sdk:4.8-windowsservercore-ltsc2022 AS build
-ARG BUILD_CONFIGURATION=Release
-
-# Set the default shell to PowerShell
-SHELL ["powershell", "-command"]
-
-WORKDIR /app
-
-# Copy the solution and project files
-COPY YourSolution.sln .
-COPY YourProject/*.csproj ./YourProject/
-COPY YourOtherProject/*.csproj ./YourOtherProject/
-
-# Copy packages.config files
-COPY YourProject/packages.config ./YourProject/
-COPY YourOtherProject/packages.config ./YourOtherProject/
-
-# Restore NuGet packages
-RUN nuget restore YourSolution.sln
-
-# Copy source code
-COPY . .
-
-# Perform custom pre-build steps here, if needed
-
-# Build and publish the application to C:\publish
-RUN msbuild /p:Configuration=$BUILD_CONFIGURATION `
- /p:WebPublishMethod=FileSystem `
- /p:PublishUrl=C:\publish `
- /p:DeployDefaultTarget=WebPublish
-
-# Perform custom post-build steps here, if needed
-
-# ============================================================
-# Stage 2: Final runtime image
-# ============================================================
-
-# Base Image - Select the appropriate .NET Framework version and Windows Server Core version
-# Possible tags include:
-# - 4.8.1-windowsservercore-ltsc2025 (Windows Server 2025)
-# - 4.8-windowsservercore-ltsc2022 (Windows Server 2022)
-# - 4.8-windowsservercore-ltsc2019 (Windows Server 2019)
-# - 4.8-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 4.7.2-windowsservercore-ltsc2019 (Windows Server 2019)
-# - 4.7.2-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 4.7.1-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 4.7-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 4.6.2-windowsservercore-ltsc2016 (Windows Server 2016)
-# - 3.5-windowsservercore-ltsc2025 (Windows Server 2025)
-# - 3.5-windowsservercore-ltsc2022 (Windows Server 2022)
-# - 3.5-windowsservercore-ltsc2019 (Windows Server 2019)
-# - 3.5-windowsservercore-ltsc2019 (Windows Server 2016)
-# Uses the .NET Framework ASP.NET image for running the application
-FROM mcr.microsoft.com/dotnet/framework/aspnet:4.8-windowsservercore-ltsc2022
-
-# Set the default shell to PowerShell
-SHELL ["powershell", "-command"]
-
-WORKDIR /inetpub/wwwroot
-
-# Copy from build stage
-COPY --from=build /publish .
-
-# Add any additional environment variables needed for your application (uncomment and modify as needed)
-# ENV KEY=VALUE
-
-# Install MSI packages (uncomment and modify as needed)
-# COPY ./msi-installers C:/Installers
-# RUN Start-Process -Wait -FilePath 'msiexec.exe' -ArgumentList '/i', 'C:\Installers\your-package.msi', '/quiet', '/norestart'
-
-# Install custom Windows Server roles and features (uncomment and modify as needed)
-# RUN dism /Online /Enable-Feature /FeatureName:YOUR-FEATURE-NAME
-
-# Add additional Windows features (uncomment and modify as needed)
-# RUN Add-WindowsFeature Some-Windows-Feature; `
-# Add-WindowsFeature Another-Windows-Feature
-
-# Install MSI packages if needed (uncomment and modify as needed)
-# COPY ./msi-installers C:/Installers
-# RUN Start-Process -Wait -FilePath 'msiexec.exe' -ArgumentList '/i', 'C:\Installers\your-package.msi', '/quiet', '/norestart'
-
-# Register assemblies in GAC if needed (uncomment and modify as needed)
-# COPY ./assemblies C:/Assemblies
-# RUN C:\Windows\Microsoft.NET\Framework64\v4.0.30319\gacutil -i C:/Assemblies/YourAssembly.dll
-
-# Register COM components if needed (uncomment and modify as needed)
-# COPY ./com-components C:/Components
-# RUN regsvr32 /s C:/Components/YourComponent.dll
-
-# Add registry keys if needed (uncomment and modify as needed)
-# RUN New-Item -Path 'HKLM:\Software\YourApp' -Force; `
-# Set-ItemProperty -Path 'HKLM:\Software\YourApp' -Name 'Setting' -Value 'Value'
-
-# Configure IIS settings if needed (uncomment and modify as needed)
-# RUN Import-Module WebAdministration; `
-# Set-ItemProperty 'IIS:\AppPools\DefaultAppPool' -Name somePropertyName -Value 'SomePropertyValue'; `
-# Set-ItemProperty 'IIS:\Sites\Default Web Site' -Name anotherPropertyName -Value 'AnotherPropertyValue'
-
-# Expose necessary ports - By default, IIS uses port 80
-EXPOSE 80
-# EXPOSE 443 # Uncomment if using HTTPS
-
-# Copy LogMonitor from the microsoft/windows-container-tools repository
-WORKDIR /LogMonitor
-RUN curl -fSLo LogMonitor.exe https://github.com/microsoft/windows-container-tools/releases/download/v2.1.1/LogMonitor.exe
-
-# Copy LogMonitorConfig.json from local files
-COPY LogMonitorConfig.json .
-
-# Set non-administrator user
-USER ContainerUser
-
-# Override the container's default entry point to take advantage of the LogMonitor
-ENTRYPOINT [ "C:\\LogMonitor\\LogMonitor.exe", "C:\\ServiceMonitor.exe", "w3svc" ]
-```
-
-## Adapting this Example
-
-**Note:** Customize this template based on the specific requirements in the containerization settings.
-
-When adapting this example Dockerfile:
-
-1. Replace `YourSolution.sln`, `YourProject.csproj`, etc. with your actual file names
-2. Adjust the Windows Server and .NET Framework versions as needed
-3. Modify the dependency installation steps based on your requirements and remove any unnecessary ones
-4. Add or remove stages as needed for your specific workflow
-
-## Notes on Stage Naming
-
-- The `AS stage-name` syntax gives each stage a name
-- Use `--from=stage-name` to copy files from a previous stage
-- You can have multiple intermediate stages that aren't used in the final image
-
-### LogMonitorConfig.json
-
-The LogMonitorConfig.json file should be created in the root of the project directory. It is used to configure the LogMonitor tool, which monitors logs in the container. The contents of this file should look exactly like this to ensure proper logging functionality:
-```json
-{
- "LogConfig": {
- "sources": [
- {
- "type": "EventLog",
- "startAtOldestRecord": true,
- "eventFormatMultiLine": false,
- "channels": [
- {
- "name": "system",
- "level": "Warning"
- },
- {
- "name": "application",
- "level": "Error"
- }
- ]
- },
- {
- "type": "File",
- "directory": "c:\\inetpub\\logs",
- "filter": "*.log",
- "includeSubdirectories": true,
- "includeFileNames": false
- },
- {
- "type": "ETW",
- "eventFormatMultiLine": false,
- "providers": [
- {
- "providerName": "IIS: WWW Server",
- "providerGuid": "3A2A4E84-4C21-4981-AE10-3FDA0D9B0F83",
- "level": "Information"
- },
- {
- "providerName": "Microsoft-Windows-IIS-Logging",
- "providerGuid": "7E8AD27F-B271-4EA2-A783-A47BDE29143B",
- "level": "Information"
- }
- ]
- }
- ]
- }
-}
-```
diff --git a/prompts/containerize-aspnetcore.prompt.md b/prompts/containerize-aspnetcore.prompt.md
deleted file mode 100644
index b7772245..00000000
--- a/prompts/containerize-aspnetcore.prompt.md
+++ /dev/null
@@ -1,393 +0,0 @@
----
-agent: 'agent'
-tools: ['search/codebase', 'edit/editFiles', 'terminalCommand']
-description: 'Containerize an ASP.NET Core project by creating Dockerfile and .dockerfile files customized for the project.'
----
-
-# ASP.NET Core Docker Containerization Prompt
-
-## Containerization Request
-
-Containerize the ASP.NET Core (.NET) project specified in the settings below, focusing **exclusively** on changes required for the application to run in a Linux Docker container. Containerization should consider all settings specified here.
-
-Abide by best practices for containerizing .NET Core applications, ensuring that the container is optimized for performance, security, and maintainability.
-
-## Containerization Settings
-
-This section of the prompt contains the specific settings and configurations required for containerizing the ASP.NET Core application. Prior to running this prompt, ensure that the settings are filled out with the necessary information. Note that in many cases, only the first few settings are required. Later settings can be left as defaults if they do not apply to the project being containerized.
-
-Any settings that are not specified will be set to default values. The default values are provided in `[square brackets]`.
-
-### Basic Project Information
-1. Project to containerize:
- - `[ProjectName (provide path to .csproj file)]`
-
-2. .NET version to use:
- - `[8.0 or 9.0 (Default 8.0)]`
-
-3. Linux distribution to use:
- - `[debian, alpine, ubuntu, chiseled, or Azure Linux (mariner) (Default debian)]`
-
-4. Custom base image for the build stage of the Docker image ("None" to use standard Microsoft base image):
- - `[Specify base image to use for build stage (Default None)]`
-
-5. Custom base image for the run stage of the Docker image ("None" to use standard Microsoft base image):
- - `[Specify base image to use for run stage (Default None)]`
-
-### Container Configuration
-1. Ports that must be exposed in the container image:
- - Primary HTTP port: `[e.g., 8080]`
- - Additional ports: `[List any additional ports, or "None"]`
-
-2. User account the container should run as:
- - `[User account, or default to "$APP_UID"]`
-
-3. Application URL configuration:
- - `[Specify ASPNETCORE_URLS, or default to "http://+:8080"]`
-
-### Build configuration
-1. Custom build steps that must be performed before building the container image:
- - `[List any specific build steps, or "None"]`
-
-2. Custom build steps that must be performed after building the container image:
- - `[List any specific build steps, or "None"]`
-
-3. NuGet package sources that must be configured:
- - `[List any private NuGet feeds with authentication details, or "None"]`
-
-### Dependencies
-1. System packages that must be installed in the container image:
- - `[Package names for the chosen Linux distribution, or "None"]`
-
-2. Native libraries that must be copied to the container image:
- - `[Library names and paths, or "None"]`
-
-3. Additional .NET tools that must be installed:
- - `[Tool names and versions, or "None"]`
-
-### System Configuration
-1. Environment variables that must be set in the container image:
- - `[Variable names and values, or "Use defaults"]`
-
-### File System
-1. Files/directories that need to be copied to the container image:
- - `[Paths relative to project root, or "None"]`
- - Target location in container: `[Container paths, or "Not applicable"]`
-
-2. Files/directories to exclude from containerization:
- - `[Paths to exclude, or "None"]`
-
-3. Volume mount points that should be configured:
- - `[Volume paths for persistent data, or "None"]`
-
-### .dockerignore Configuration
-1. Patterns to include in the `.dockerignore` file (.dockerignore will already have common defaults; these are additional patterns):
- - Additional patterns: `[List any additional patterns, or "None"]`
-
-### Health Check Configuration
-1. Health check endpoint:
- - `[Health check URL path, or "None"]`
-
-2. Health check interval and timeout:
- - `[Interval and timeout values, or "Use defaults"]`
-
-### Additional Instructions
-1. Other instructions that must be followed to containerize the project:
- - `[Specific requirements, or "None"]`
-
-2. Known issues to address:
- - `[Describe any known issues, or "None"]`
-
-## Scope
-
-- β App configuration modification to ensure application settings and connection strings can be read from environment variables
-- β Dockerfile creation and configuration for an ASP.NET Core application
-- β Specifying multiple stages in the Dockerfile to build/publish the application and copy the output to the final image
-- β Configuration of Linux container platform compatibility (Alpine, Ubuntu, Chiseled, or Azure Linux (Mariner))
-- β Proper handling of dependencies (system packages, native libraries, additional tools)
-- β No infrastructure setup (assumed to be handled separately)
-- β No code changes beyond those required for containerization
-
-## Execution Process
-
-1. Review the containerization settings above to understand the containerization requirements
-2. Create a `progress.md` file to track changes with check marks
-3. Determine the .NET version from the project's .csproj file by checking the `TargetFramework` element
-4. Select the appropriate Linux container image based on:
- - The .NET version detected from the project
- - The Linux distribution specified in containerization settings (Alpine, Ubuntu, Chiseled, or Azure Linux (Mariner))
- - If the user does not request specific base images in the containerization settings, then the base images MUST be valid mcr.microsoft.com/dotnet images with a tag as shown in the example Dockerfile, below, or in documentation
- - Official Microsoft .NET images for build and runtime stages:
- - SDK image tags (for build stage): https://github.com/dotnet/dotnet-docker/blob/main/README.sdk.md
- - ASP.NET Core runtime image tags: https://github.com/dotnet/dotnet-docker/blob/main/README.aspnet.md
- - .NET runtime image tags: https://github.com/dotnet/dotnet-docker/blob/main/README.runtime.md
-5. Create a Dockerfile in the root of the project directory to containerize the application
- - The Dockerfile should use multiple stages:
- - Build stage: Use a .NET SDK image to build the application
- - Copy csproj file(s) first
- - Copy NuGet.config if one exists and configure any private feeds
- - Restore NuGet packages
- - Then, copy the rest of the source code and build and publish the application to /app/publish
- - Final stage: Use the selected .NET runtime image to run the application
- - Set the working directory to /app
- - Set the user as directed (by default, to a non-root user (e.g., `$APP_UID`))
- - Unless directed otherwise in containerization settings, a new user does *not* need to be created. Use the `$APP_UID` variable to specify the user account.
- - Copy the published output from the build stage to the final image
- - Be sure to consider all requirements in the containerization settings:
- - .NET version and Linux distribution
- - Exposed ports
- - User account for container
- - ASPNETCORE_URLS configuration
- - System package installation
- - Native library dependencies
- - Additional .NET tools
- - Environment variables
- - File/directory copying
- - Volume mount points
- - Health check configuration
-6. Create a `.dockerignore` file in the root of the project directory to exclude unnecessary files from the Docker image. The `.dockerignore` file **MUST** include at least the following elements as well as additional patterns as specified in the containerization settings:
- - bin/
- - obj/
- - .dockerignore
- - Dockerfile
- - .git/
- - .github/
- - .vs/
- - .vscode/
- - **/node_modules/
- - *.user
- - *.suo
- - **/.DS_Store
- - **/Thumbs.db
- - Any additional patterns specified in the containerization settings
-7. Configure health checks if specified in the containerization settings:
- - Add HEALTHCHECK instruction to Dockerfile if health check endpoint is provided
- - Use curl or wget to check the health endpoint
-8. Mark tasks as completed: [ ] β [β]
-9. Continue until all tasks are complete and Docker build succeeds
-
-## Build and Runtime Verification
-
-Confirm that Docker build succeeds once the Dockerfile is completed. Use the following command to build the Docker image:
-
-```bash
-docker build -t aspnetcore-app:latest .
-```
-
-If the build fails, review the error messages and make necessary adjustments to the Dockerfile or project configuration. Report success/failure.
-
-## Progress Tracking
-
-Maintain a `progress.md` file with the following structure:
-```markdown
-# Containerization Progress
-
-## Environment Detection
-- [ ] .NET version detection (version: ___)
-- [ ] Linux distribution selection (distribution: ___)
-
-## Configuration Changes
-- [ ] Application configuration verification for environment variable support
-- [ ] NuGet package source configuration (if applicable)
-
-## Containerization
-- [ ] Dockerfile creation
-- [ ] .dockerignore file creation
-- [ ] Build stage created with SDK image
-- [ ] csproj file(s) copied for package restore
-- [ ] NuGet.config copied if applicable
-- [ ] Runtime stage created with runtime image
-- [ ] Non-root user configuration
-- [ ] Dependency handling (system packages, native libraries, tools, etc.)
-- [ ] Health check configuration (if applicable)
-- [ ] Special requirements implementation
-
-## Verification
-- [ ] Review containerization settings and make sure that all requirements are met
-- [ ] Docker build success
-```
-
-Do not pause for confirmation between steps. Continue methodically until the application has been containerized and Docker build succeeds.
-
-**YOU ARE NOT DONE UNTIL ALL CHECKBOXES ARE MARKED!** This includes building the Docker image successfully and addressing any issues that arise during the build process.
-
-## Example Dockerfile
-
-An example Dockerfile for an ASP.NET Core (.NET) application using a Linux base image.
-
-```dockerfile
-# ============================================================
-# Stage 1: Build and publish the application
-# ============================================================
-
-# Base Image - Select the appropriate .NET SDK version and Linux distribution
-# Possible tags include:
-# - 8.0-bookworm-slim (Debian 12)
-# - 8.0-noble (Ubuntu 24.04)
-# - 8.0-alpine (Alpine Linux)
-# - 9.0-bookworm-slim (Debian 12)
-# - 9.0-noble (Ubuntu 24.04)
-# - 9.0-alpine (Alpine Linux)
-# Uses the .NET SDK image for building the application
-FROM mcr.microsoft.com/dotnet/sdk:8.0-bookworm-slim AS build
-ARG BUILD_CONFIGURATION=Release
-
-WORKDIR /src
-
-# Copy project files first for better caching
-COPY ["YourProject/YourProject.csproj", "YourProject/"]
-COPY ["YourOtherProject/YourOtherProject.csproj", "YourOtherProject/"]
-
-# Copy NuGet configuration if it exists
-COPY ["NuGet.config", "."]
-
-# Restore NuGet packages
-RUN dotnet restore "YourProject/YourProject.csproj"
-
-# Copy source code
-COPY . .
-
-# Perform custom pre-build steps here, if needed
-# RUN echo "Running pre-build steps..."
-
-# Build and publish the application
-WORKDIR "/src/YourProject"
-RUN dotnet build "YourProject.csproj" -c $BUILD_CONFIGURATION -o /app/build
-
-# Publish the application
-RUN dotnet publish "YourProject.csproj" -c $BUILD_CONFIGURATION -o /app/publish /p:UseAppHost=false
-
-# Perform custom post-build steps here, if needed
-# RUN echo "Running post-build steps..."
-
-# ============================================================
-# Stage 2: Final runtime image
-# ============================================================
-
-# Base Image - Select the appropriate .NET runtime version and Linux distribution
-# Possible tags include:
-# - 8.0-bookworm-slim (Debian 12)
-# - 8.0-noble (Ubuntu 24.04)
-# - 8.0-alpine (Alpine Linux)
-# - 8.0-noble-chiseled (Ubuntu 24.04 Chiseled)
-# - 8.0-azurelinux3.0 (Azure Linux)
-# - 9.0-bookworm-slim (Debian 12)
-# - 9.0-noble (Ubuntu 24.04)
-# - 9.0-alpine (Alpine Linux)
-# - 9.0-noble-chiseled (Ubuntu 24.04 Chiseled)
-# - 9.0-azurelinux3.0 (Azure Linux)
-# Uses the .NET runtime image for running the application
-FROM mcr.microsoft.com/dotnet/aspnet:8.0-bookworm-slim AS final
-
-# Install system packages if needed (uncomment and modify as needed)
-# RUN apt-get update && apt-get install -y \
-# curl \
-# wget \
-# ca-certificates \
-# libgdiplus \
-# && rm -rf /var/lib/apt/lists/*
-
-# Install additional .NET tools if needed (uncomment and modify as needed)
-# RUN dotnet tool install --global dotnet-ef --version 8.0.0
-# ENV PATH="$PATH:/root/.dotnet/tools"
-
-WORKDIR /app
-
-# Copy published application from build stage
-COPY --from=build /app/publish .
-
-# Copy additional files if needed (uncomment and modify as needed)
-# COPY ./config/appsettings.Production.json .
-# COPY ./certificates/ ./certificates/
-
-# Set environment variables
-ENV ASPNETCORE_ENVIRONMENT=Production
-ENV ASPNETCORE_URLS=http://+:8080
-
-# Add custom environment variables if needed (uncomment and modify as needed)
-# ENV CONNECTIONSTRINGS__DEFAULTCONNECTION="your-connection-string"
-# ENV FEATURE_FLAG_ENABLED=true
-
-# Configure SSL/TLS certificates if needed (uncomment and modify as needed)
-# ENV ASPNETCORE_Kestrel__Certificates__Default__Path=/app/certificates/app.pfx
-# ENV ASPNETCORE_Kestrel__Certificates__Default__Password=your_password
-
-# Expose the port the application listens on
-EXPOSE 8080
-# EXPOSE 8081 # Uncomment if using HTTPS
-
-# Install curl for health checks if not already present
-RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
-
-# Configure health check
-HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
- CMD curl -f http://localhost:8080/health || exit 1
-
-# Create volumes for persistent data if needed (uncomment and modify as needed)
-# VOLUME ["/app/data", "/app/logs"]
-
-# Switch to non-root user for security
-USER $APP_UID
-
-# Set the entry point for the application
-ENTRYPOINT ["dotnet", "YourProject.dll"]
-```
-
-## Adapting this Example
-
-**Note:** Customize this template based on the specific requirements in containerization settings.
-
-When adapting this example Dockerfile:
-
-1. Replace `YourProject.csproj`, `YourProject.dll`, etc. with your actual project names
-2. Adjust the .NET version and Linux distribution as needed
-3. Modify the dependency installation steps based on your requirements and remove any unnecessary ones
-4. Configure environment variables specific to your application
-5. Add or remove stages as needed for your specific workflow
-6. Update the health check endpoint to match your application's health check route
-
-## Linux Distribution Variations
-
-### Alpine Linux
-For smaller image sizes, you can use Alpine Linux:
-
-```dockerfile
-FROM mcr.microsoft.com/dotnet/sdk:8.0-alpine AS build
-# ... build steps ...
-
-FROM mcr.microsoft.com/dotnet/aspnet:8.0-alpine AS final
-# Install packages using apk
-RUN apk update && apk add --no-cache curl ca-certificates
-```
-
-### Ubuntu Chiseled
-For minimal attack surface, consider using chiseled images:
-
-```dockerfile
-FROM mcr.microsoft.com/dotnet/aspnet:8.0-jammy-chiseled AS final
-# Note: Chiseled images have minimal packages, so you may need to use a different base for additional dependencies
-```
-
-### Azure Linux (Mariner)
-For Azure-optimized containers:
-
-```dockerfile
-FROM mcr.microsoft.com/dotnet/aspnet:8.0-azurelinux3.0 AS final
-# Install packages using tdnf
-RUN tdnf update -y && tdnf install -y curl ca-certificates && tdnf clean all
-```
-
-## Notes on Stage Naming
-
-- The `AS stage-name` syntax gives each stage a name
-- Use `--from=stage-name` to copy files from a previous stage
-- You can have multiple intermediate stages that aren't used in the final image
-- The `final` stage is the one that becomes the final container image
-
-## Security Best Practices
-
-- Always run as a non-root user in production
-- Use specific image tags instead of `latest`
-- Minimize the number of installed packages
-- Keep base images updated
-- Use multi-stage builds to exclude build dependencies from the final image
diff --git a/prompts/context-map.prompt.md b/prompts/context-map.prompt.md
deleted file mode 100644
index d3ab149a..00000000
--- a/prompts/context-map.prompt.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-agent: 'agent'
-tools: ['codebase']
-description: 'Generate a map of all files relevant to a task before making changes'
----
-
-# Context Map
-
-Before implementing any changes, analyze the codebase and create a context map.
-
-## Task
-
-{{task_description}}
-
-## Instructions
-
-1. Search the codebase for files related to this task
-2. Identify direct dependencies (imports/exports)
-3. Find related tests
-4. Look for similar patterns in existing code
-
-## Output Format
-
-```markdown
-## Context Map
-
-### Files to Modify
-| File | Purpose | Changes Needed |
-|------|---------|----------------|
-| path/to/file | description | what changes |
-
-### Dependencies (may need updates)
-| File | Relationship |
-|------|--------------|
-| path/to/dep | imports X from modified file |
-
-### Test Files
-| Test | Coverage |
-|------|----------|
-| path/to/test | tests affected functionality |
-
-### Reference Patterns
-| File | Pattern |
-|------|---------|
-| path/to/similar | example to follow |
-
-### Risk Assessment
-- [ ] Breaking changes to public API
-- [ ] Database migrations needed
-- [ ] Configuration changes required
-```
-
-Do not proceed with implementation until this map is reviewed.
diff --git a/prompts/conventional-commit.prompt.md b/prompts/conventional-commit.prompt.md
deleted file mode 100644
index 1bec5f69..00000000
--- a/prompts/conventional-commit.prompt.md
+++ /dev/null
@@ -1,72 +0,0 @@
----
-description: 'Prompt and workflow for generating conventional commit messages using a structured XML format. Guides users to create standardized, descriptive commit messages in line with the Conventional Commits specification, including instructions, examples, and validation.'
-tools: ['execute/runInTerminal', 'execute/getTerminalOutput']
----
-
-### Instructions
-
-```xml
- This file contains a prompt template for generating conventional commit messages. It provides instructions, examples, and formatting guidelines to help users write standardized, descriptive commit messages in accordance with the Conventional Commits specification.
-```
-
-### Workflow
-
-**Follow these steps:**
-
-1. Run `git status` to review changed files.
-2. Run `git diff` or `git diff --cached` to inspect changes.
-3. Stage your changes with `git add `.
-4. Construct your commit message using the following XML structure.
-5. After generating your commit message, Copilot will automatically run the following command in your integrated terminal (no confirmation needed):
-
-```bash
-git commit -m "type(scope): description"
-```
-
-6. Just execute this prompt and Copilot will handle the commit for you in the terminal.
-
-### Commit Message Structure
-
-```xml
-
- feat|fix|docs|style|refactor|perf|test|build|ci|chore|revert
- ()
- A short, imperative summary of the change
- (optional: more detailed explanation)
-
-
-```
-
-### Examples
-
-```xml
-
- feat(parser): add ability to parse arrays
- fix(ui): correct button alignment
- docs: update README with usage instructions
- refactor: improve performance of data processing
- chore: update dependencies
- feat!: send email on registration (BREAKING CHANGE: email service required)
-
-```
-
-### Validation
-
-```xml
-
- Must be one of the allowed types. See https://www.conventionalcommits.org/en/v1.0.0/#specification
- Optional, but recommended for clarity.
- Required. Use the imperative mood (e.g., "add", not "added").
- Optional. Use for additional context.
-
-
-```
-
-### Final Step
-
-```xml
-
- git commit -m "type(scope): description"
- Replace with your constructed message. Include body and footer if needed.
-
-```
diff --git a/prompts/convert-plaintext-to-md.prompt.md b/prompts/convert-plaintext-to-md.prompt.md
deleted file mode 100644
index 4af122da..00000000
--- a/prompts/convert-plaintext-to-md.prompt.md
+++ /dev/null
@@ -1,363 +0,0 @@
----
-agent: agent
-description: 'Convert a text-based document to markdown following instructions from prompt, or if a documented option is passed, follow the instructions for that option.'
-tools: ['edit', 'edit/editFiles', 'web/fetch', 'runCommands', 'search', 'search/readFile', 'search/textSearch']
----
-
-# Convert Plaintext Documentation to Markdown
-
-## Current Role
-
-You are an expert technical documentation specialist who converts plain text or generic text-based
-documentation files to properly formatted markdown.
-
-## Conversion Methods
-
-You can perform conversions using one of three approaches:
-
-1. **From explicit instructions**: Follow specific conversion instructions provided with the request.
-2. **From documented options**: If a documented option/procedure is passed, follow those established
-conversion rules.
-3. **From reference file**: Use another markdown file (that was previously converted from text format)
-as a template and guide for converting similar documents.
-
-## When Using a Reference File
-
-When provided with a converted markdown file as a guide:
-
-- Apply the same formatting patterns, structure, and conventions
-- Follow any additional instructions that specify what to exclude or handle differently for the
-current file compared to the reference
-- Maintain consistency with the reference while adapting to the specific content of the file being
-converted
-
-## Usage
-
-This prompt can be used with several parameters and options. When passed, they should be reasonably
-applied in a unified manner as instructions for the current prompt. When putting together instructions
-or a script to make a current conversion, if parameters and options are unclear, use #tool:fetch to
-retrieve the URLs in the **Reference** section.
-
-```bash
-/convert-plaintext-to-md <#file:{{file}}> [finalize] [guide #file:{{reference-file}}] [instructions] [platform={{name}}] [options] [pre=]
-```
-
-### Parameters
-
-- **#file:{{file}}** (required) - The plain or generic text documentation file to convert to markdown.
-If a corresponding `{{file}}.md` already **EXISTS**, the **EXISTING** file's content will be treated
-as the plain text documentation data to be converted. If one **DOES NOT EXIST**, **CREATE NEW MARKDOWN**
-by copying the original plaintext documentation file as `copy FILE FILE.md` in the same directory as
-the plain text documentation file.
-- **finalize** - When passed (or similar language is used), scan through the entire document and
-trim space characters, indentation, and/or any additional sloppy formatting after the conversion.
-- **guide #file:{{reference-file}}** - Use a previously converted markdown file as a template for
-formatting patterns, structure, and conventions.
-- **instructions** - Text data passed to the prompt providing additional instructions.
-- **platform={{name}}** - Specify the target platform for markdown rendering to ensure compatibility:
- - **GitHub** (default) - GitHub-flavored markdown (GFM) with tables, task lists, strikethrough,
- and alerts
- - **StackOverflow** - CommonMark with StackOverflow-specific extensions
- - **VS Code** - Optimized for VS Code's markdown preview renderer
- - **GitLab** - GitLab-flavored markdown with platform-specific features
- - **CommonMark** - Standard CommonMark specification
-
-### Options
-
-- **--header [1-4]** - Add markdown header tags to the document:
- - **[1-4]** - Specifies the header level to add (# through ####)
- - **#selection** - Data used to:
- - Identify sections where updates should be applied
- - Serve as a guide for applying headers to other sections or the entire document
- - **Auto-apply** (if none provided) - Add headers based on content structure
-- **-p, --pattern** - Follow an existing pattern from:
- - **#selection** - A selected pattern to follow when updating the file or a portion of it
- - **IMPORTANT**: DO NOT only edit the selection when passed to `{{[-p, --pattern]}}`
- - **NOTE**: The selection is **NOT** the **WORKING RANGE**
- - Identify pattern(s) from the selection
- - **Stopping Points**:
- - If `{{[-s, --stop]}} eof` is passed or no clear endpoint is specified, convert to end of file
- - If `-s [0-9]+` is passed, convert to the line number specified in the regex `[0-9]+`
- - **Prompt instructions** - Instructional data passed with the prompt
- - **Auto-detect** (if none provided) - Identify existing patterns in the file by:
- - Analyzing where patterns occur
- - Identifying data that does not match the pattern
- - Applying patterns from one section to corresponding sections where the pattern is missing
-- **-s, --stop <[0-9]+ | eof>**
- - **[0-9]+** - Line number to stop the **current** markdown conversion at
- - **eof** - If passed, or any other text clearly indicating **end of file**, convert to end of file
-
-### Predefined Instructions
-
-If any of the predefined instructions are passed as an argument, expand and use them as **ADDITIONAL**
-input for the prompt instructions. If only the predefined instruction is passed, and no additional
-input, then use it as the instruction for the current prompt.
-
-#### Syntax
-
-```bash
-/convert-plaintext-to-md pre=
-```
-
-#### Predefined
-
-- **rm-head-digits** - Remove any prepending numbers from the headers when updating or converting the
-plaintext to markdown.
-- **mv-head-level(x, y)** - Change the heading level from level `x` header to a level `y` header when
-updating or converting plaintext to markdown.
-- **rm-indent(x)** - Decrease the indentation of paragraphs or raw text data portions of the file by
-`x` when updating or converting plaintext to markdown.
-
-> [!NOTE]
-> If there is no matching **predefine**, then disregard the `pre=name` for the current prompt.
-
-### Usage Example
-
-#### Basic Use
-
-**User**
-
-```bash
-/convert-plaintext-to-md #file
-```
-
-**Agent**
-
-```
-I see there is no corresponding `file.md`. Let me copy the plaintext document:
-
- ```bash
- cp file file.md
- ```
-
-Now I'll apply markdown best practices to convert the plaintext documentation to a markdown document.
-```
-
-#### Using a Guide to Convert to Markdown
-
-**User**
-
-```bash
-/convert-plaintext-to-md #file.md --guide #CODE.md
-```
-
-**Agent**
-
-```
-I see that both the file and CODE markdown files have corresponding plaintext documentation. Let me
-check CODE and compare it to CODE.md, and identify patterns to help with conversion.
-
- CODE
- ```text
- Title
-
- Text in opening.
-
- 1. Section
- 1.1 Section Item
-
- =============================================
-
- 1. Section
- 1.1
- Full details of section item.
- ```
-
-And looking at CODE.md:
-
- CODE.md
- ```markdown
- # Title
-
- Text in opening.
-
- ### [Section](#1-section)
-
- - [1.1](#11) Section Item but Only Numbers in Sub-heading
-
- ***
-
- ## 1. Section
-
- ### 1.1
-
- Full details of section item.
- ```
-
-I see file.md exists and matches the text in file.
-
-I'll convert file.md similarly. Let's look at the file text documentation:
-
- file
- ```text
- Title
-
- 1. Command
- 1.10.0 Options
-
- ---------------------------------------------
-
- Command
- 1.10.0
- Info on the commands' options.
- ```
-
-OK, after analyzing the guide file, I've updated file.md to:
-
- file.md
- ```markdown
- # Title
-
- ### [1. Command](#command)
- - [1.10.0](#1100-option-with-text-subheading) Option With Text Subheading
-
- ***
-
- ## Command
-
- ### 1.10.0 Option With Text Subheading
- Info on the commands' options.
- ```
-```
-
-#### Finalize is Clearly Triggered
-
-**User**
-
-```bash
-/convert-plaintext-to-md #file.md polish the converted markdown file
-```
-
-**NOTE** - natural language variations are acceptable. Hence the use of "polish the converted markdown file".
-
-**Agent**
-
-```
-I'll finalize file.md. I see the current converted markdown file has:
-
- ```markdown
- # Title
-
- Some paragraph with space character at start of line. This should be escaped.
-
- - Some list item with extra space character.
- - Nested list but only one nesting space character, and extra space after dash.
- if (a == 1) {
- return "Indented code block without enough space characters.";
- } else {
- return "Passing `finalize` or similar should resolve this.";
- }
-
- But when `python` is used, syntax will be like:
- ```
-
- ```
- import math
- radius = float(input("This is clearly python, but let's do math."))
- area = math.pi * radius**2
- circumference = 2 * math.pi * radius
- print(f"Area: {area:.2f}, Circumference: {circumference:.2f}")
- ```
-
-Let me trim a few things:
-
- ```markdown
- # Title
-
- Some paragraph with space character at start of line. This \ should be escaped.
-
- - Some list item with extra space character.
- - Nested list but only one space character.
-
- if (a == 1) {
- return "Indented code block without enough space characters.";
- } else {
- return "Passing `finalize` or similar should resolve this.";
- }
-
- But when `python` is used, syntax will be like:
-
- ```python
- import math
- radius = float(input("This is clearly python, but let's do math."))
- area = math.pi * radius**2
- circumference = 2 * math.pi * radius
- print(f"Area: {area:.2f}, Circumference: {circumference:.2f}")
- ```
-```
-
-#### No Existing Markdown File and Inline shorthand Instructions
-
-**User**
-
-```bash
-/convert-plaintext-to-md #BUGS --p "regex()=> ^ {1,}([0-9]+\.[0-9]+\.[0-9]+) to ^### $1"
-```
-
-**Agent**
-
-```
-I see that there is no `BUGS.md` file. Let me copy the current plaintext BUGS documentation:
-
- ```bash
- copy BUGS BUGS.md
- ```
-
-Now I'll update the copied file BUGS.md, converting it to proper markdown.
-Let me read the current contents:
-
- ```markdown
- 1 Section
- 1.1 Section Item
- 1.2 Section Item
- ...
- 1.12 Section Item
- ```
-
-OK, I've matched the shorthand regular expression pattern and updated the BUGS.md to:
-
- ```markdown
- 1 Section
- ### 1.1 Section Item
- ### 1.2 Section Item
- ### ...
- ### 1.12 Section Item
- ```
-```
-
-## Considerations
-
-### Patterns
-
-- Line indentation
-- Identifying indented code blocks
-- Identifying fenced code blocks
- - Identifying programming language for code blocks
-- When converting do not stop the process when procedures regarding `exit()` and ending tasks are documented.
- - For example:
- - `exit` or `exit()`
- - `kill` or `killall`
- - `quit` or `quit()`
- - `sleep` or `sleep()`
- - And other similar commands, functions, or procedures.
-
-> [!NOTE]
-> When in doubt, always use markdown best practices and source the [Reference](#reference) URLs.
-
-## Goal
-
-- Preserve all technical content accurately
-- Maintain proper markdown syntax and formatting (see references below)
-- Ensure headers, lists, code blocks, and other elements are correctly structured
-- Keep the document readable and well-organized
-- Assemble a unified set of instructions or script to convert text to markdown using all parameters
-and options provided
-
-### Reference
-
-- #fetch β https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax
-- #fetch β https://www.markdownguide.org/extended-syntax/
-- #fetch β https://learn.microsoft.com/en-us/azure/devops/project/wiki/markdown-guidance?view=azure-devops
-
-> [!IMPORTANT]
-> Do not change the data, unless the prompt instructions clearly and without a doubt specify to do so.
diff --git a/prompts/copilot-instructions-blueprint-generator.prompt.md b/prompts/copilot-instructions-blueprint-generator.prompt.md
deleted file mode 100644
index 7dd6bd4e..00000000
--- a/prompts/copilot-instructions-blueprint-generator.prompt.md
+++ /dev/null
@@ -1,294 +0,0 @@
----
-description: 'Technology-agnostic blueprint generator for creating comprehensive copilot-instructions.md files that guide GitHub Copilot to produce code consistent with project standards, architecture patterns, and exact technology versions by analyzing existing codebase patterns and avoiding assumptions.'
-agent: 'agent'
----
-
-# Copilot Instructions Blueprint Generator
-
-## Configuration Variables
-${PROJECT_TYPE="Auto-detect|.NET|Java|JavaScript|TypeScript|React|Angular|Python|Multiple|Other"}
-${ARCHITECTURE_STYLE="Layered|Microservices|Monolithic|Domain-Driven|Event-Driven|Serverless|Mixed"}
-${CODE_QUALITY_FOCUS="Maintainability|Performance|Security|Accessibility|Testability|All"}
-${DOCUMENTATION_LEVEL="Minimal|Standard|Comprehensive"}
-${TESTING_REQUIREMENTS="Unit|Integration|E2E|TDD|BDD|All"}
-${VERSIONING="Semantic|CalVer|Custom"}
-
-## Generated Prompt
-
-"Generate a comprehensive copilot-instructions.md file that will guide GitHub Copilot to produce code consistent with our project's standards, architecture, and technology versions. The instructions must be strictly based on actual code patterns in our codebase and avoid making any assumptions. Follow this approach:
-
-### 1. Core Instruction Structure
-
-```markdown
-# GitHub Copilot Instructions
-
-## Priority Guidelines
-
-When generating code for this repository:
-
-1. **Version Compatibility**: Always detect and respect the exact versions of languages, frameworks, and libraries used in this project
-2. **Context Files**: Prioritize patterns and standards defined in the .github/copilot directory
-3. **Codebase Patterns**: When context files don't provide specific guidance, scan the codebase for established patterns
-4. **Architectural Consistency**: Maintain our ${ARCHITECTURE_STYLE} architectural style and established boundaries
-5. **Code Quality**: Prioritize ${CODE_QUALITY_FOCUS == "All" ? "maintainability, performance, security, accessibility, and testability" : CODE_QUALITY_FOCUS} in all generated code
-
-## Technology Version Detection
-
-Before generating code, scan the codebase to identify:
-
-1. **Language Versions**: Detect the exact versions of programming languages in use
- - Examine project files, configuration files, and package managers
- - Look for language-specific version indicators (e.g., in .NET projects)
- - Never use language features beyond the detected version
-
-2. **Framework Versions**: Identify the exact versions of all frameworks
- - Check package.json, .csproj, pom.xml, requirements.txt, etc.
- - Respect version constraints when generating code
- - Never suggest features not available in the detected framework versions
-
-3. **Library Versions**: Note the exact versions of key libraries and dependencies
- - Generate code compatible with these specific versions
- - Never use APIs or features not available in the detected versions
-
-## Context Files
-
-Prioritize the following files in .github/copilot directory (if they exist):
-
-- **architecture.md**: System architecture guidelines
-- **tech-stack.md**: Technology versions and framework details
-- **coding-standards.md**: Code style and formatting standards
-- **folder-structure.md**: Project organization guidelines
-- **exemplars.md**: Exemplary code patterns to follow
-
-## Codebase Scanning Instructions
-
-When context files don't provide specific guidance:
-
-1. Identify similar files to the one being modified or created
-2. Analyze patterns for:
- - Naming conventions
- - Code organization
- - Error handling
- - Logging approaches
- - Documentation style
- - Testing patterns
-
-3. Follow the most consistent patterns found in the codebase
-4. When conflicting patterns exist, prioritize patterns in newer files or files with higher test coverage
-5. Never introduce patterns not found in the existing codebase
-
-## Code Quality Standards
-
-${CODE_QUALITY_FOCUS.includes("Maintainability") || CODE_QUALITY_FOCUS == "All" ? `### Maintainability
-- Write self-documenting code with clear naming
-- Follow the naming and organization conventions evident in the codebase
-- Follow established patterns for consistency
-- Keep functions focused on single responsibilities
-- Limit function complexity and length to match existing patterns` : ""}
-
-${CODE_QUALITY_FOCUS.includes("Performance") || CODE_QUALITY_FOCUS == "All" ? `### Performance
-- Follow existing patterns for memory and resource management
-- Match existing patterns for handling computationally expensive operations
-- Follow established patterns for asynchronous operations
-- Apply caching consistently with existing patterns
-- Optimize according to patterns evident in the codebase` : ""}
-
-${CODE_QUALITY_FOCUS.includes("Security") || CODE_QUALITY_FOCUS == "All" ? `### Security
-- Follow existing patterns for input validation
-- Apply the same sanitization techniques used in the codebase
-- Use parameterized queries matching existing patterns
-- Follow established authentication and authorization patterns
-- Handle sensitive data according to existing patterns` : ""}
-
-${CODE_QUALITY_FOCUS.includes("Accessibility") || CODE_QUALITY_FOCUS == "All" ? `### Accessibility
-- Follow existing accessibility patterns in the codebase
-- Match ARIA attribute usage with existing components
-- Maintain keyboard navigation support consistent with existing code
-- Follow established patterns for color and contrast
-- Apply text alternative patterns consistent with the codebase` : ""}
-
-${CODE_QUALITY_FOCUS.includes("Testability") || CODE_QUALITY_FOCUS == "All" ? `### Testability
-- Follow established patterns for testable code
-- Match dependency injection approaches used in the codebase
-- Apply the same patterns for managing dependencies
-- Follow established mocking and test double patterns
-- Match the testing style used in existing tests` : ""}
-
-## Documentation Requirements
-
-${DOCUMENTATION_LEVEL == "Minimal" ?
-`- Match the level and style of comments found in existing code
-- Document according to patterns observed in the codebase
-- Follow existing patterns for documenting non-obvious behavior
-- Use the same format for parameter descriptions as existing code` : ""}
-
-${DOCUMENTATION_LEVEL == "Standard" ?
-`- Follow the exact documentation format found in the codebase
-- Match the XML/JSDoc style and completeness of existing comments
-- Document parameters, returns, and exceptions in the same style
-- Follow existing patterns for usage examples
-- Match class-level documentation style and content` : ""}
-
-${DOCUMENTATION_LEVEL == "Comprehensive" ?
-`- Follow the most detailed documentation patterns found in the codebase
-- Match the style and completeness of the best-documented code
-- Document exactly as the most thoroughly documented files do
-- Follow existing patterns for linking documentation
-- Match the level of detail in explanations of design decisions` : ""}
-
-## Testing Approach
-
-${TESTING_REQUIREMENTS.includes("Unit") || TESTING_REQUIREMENTS == "All" ?
-`### Unit Testing
-- Match the exact structure and style of existing unit tests
-- Follow the same naming conventions for test classes and methods
-- Use the same assertion patterns found in existing tests
-- Apply the same mocking approach used in the codebase
-- Follow existing patterns for test isolation` : ""}
-
-${TESTING_REQUIREMENTS.includes("Integration") || TESTING_REQUIREMENTS == "All" ?
-`### Integration Testing
-- Follow the same integration test patterns found in the codebase
-- Match existing patterns for test data setup and teardown
-- Use the same approach for testing component interactions
-- Follow existing patterns for verifying system behavior` : ""}
-
-${TESTING_REQUIREMENTS.includes("E2E") || TESTING_REQUIREMENTS == "All" ?
-`### End-to-End Testing
-- Match the existing E2E test structure and patterns
-- Follow established patterns for UI testing
-- Apply the same approach for verifying user journeys` : ""}
-
-${TESTING_REQUIREMENTS.includes("TDD") || TESTING_REQUIREMENTS == "All" ?
-`### Test-Driven Development
-- Follow TDD patterns evident in the codebase
-- Match the progression of test cases seen in existing code
-- Apply the same refactoring patterns after tests pass` : ""}
-
-${TESTING_REQUIREMENTS.includes("BDD") || TESTING_REQUIREMENTS == "All" ?
-`### Behavior-Driven Development
-- Match the existing Given-When-Then structure in tests
-- Follow the same patterns for behavior descriptions
-- Apply the same level of business focus in test cases` : ""}
-
-## Technology-Specific Guidelines
-
-${PROJECT_TYPE == ".NET" || PROJECT_TYPE == "Auto-detect" || PROJECT_TYPE == "Multiple" ? `### .NET Guidelines
-- Detect and strictly adhere to the specific .NET version in use
-- Use only C# language features compatible with the detected version
-- Follow LINQ usage patterns exactly as they appear in the codebase
-- Match async/await usage patterns from existing code
-- Apply the same dependency injection approach used in the codebase
-- Use the same collection types and patterns found in existing code` : ""}
-
-${PROJECT_TYPE == "Java" || PROJECT_TYPE == "Auto-detect" || PROJECT_TYPE == "Multiple" ? `### Java Guidelines
-- Detect and adhere to the specific Java version in use
-- Follow the exact same design patterns found in the codebase
-- Match exception handling patterns from existing code
-- Use the same collection types and approaches found in the codebase
-- Apply the dependency injection patterns evident in existing code` : ""}
-
-${PROJECT_TYPE == "JavaScript" || PROJECT_TYPE == "TypeScript" || PROJECT_TYPE == "Auto-detect" || PROJECT_TYPE == "Multiple" ? `### JavaScript/TypeScript Guidelines
-- Detect and adhere to the specific ECMAScript/TypeScript version in use
-- Follow the same module import/export patterns found in the codebase
-- Match TypeScript type definitions with existing patterns
-- Use the same async patterns (promises, async/await) as existing code
-- Follow error handling patterns from similar files` : ""}
-
-${PROJECT_TYPE == "React" || PROJECT_TYPE == "Auto-detect" || PROJECT_TYPE == "Multiple" ? `### React Guidelines
-- Detect and adhere to the specific React version in use
-- Match component structure patterns from existing components
-- Follow the same hooks and lifecycle patterns found in the codebase
-- Apply the same state management approach used in existing components
-- Match prop typing and validation patterns from existing code` : ""}
-
-${PROJECT_TYPE == "Angular" || PROJECT_TYPE == "Auto-detect" || PROJECT_TYPE == "Multiple" ? `### Angular Guidelines
-- Detect and adhere to the specific Angular version in use
-- Follow the same component and module patterns found in the codebase
-- Match decorator usage exactly as seen in existing code
-- Apply the same RxJS patterns found in the codebase
-- Follow existing patterns for component communication` : ""}
-
-${PROJECT_TYPE == "Python" || PROJECT_TYPE == "Auto-detect" || PROJECT_TYPE == "Multiple" ? `### Python Guidelines
-- Detect and adhere to the specific Python version in use
-- Follow the same import organization found in existing modules
-- Match type hinting approaches if used in the codebase
-- Apply the same error handling patterns found in existing code
-- Follow the same module organization patterns` : ""}
-
-## Version Control Guidelines
-
-${VERSIONING == "Semantic" ?
-`- Follow Semantic Versioning patterns as applied in the codebase
-- Match existing patterns for documenting breaking changes
-- Follow the same approach for deprecation notices` : ""}
-
-${VERSIONING == "CalVer" ?
-`- Follow Calendar Versioning patterns as applied in the codebase
-- Match existing patterns for documenting changes
-- Follow the same approach for highlighting significant changes` : ""}
-
-${VERSIONING == "Custom" ?
-`- Match the exact versioning pattern observed in the codebase
-- Follow the same changelog format used in existing documentation
-- Apply the same tagging conventions used in the project` : ""}
-
-## General Best Practices
-
-- Follow naming conventions exactly as they appear in existing code
-- Match code organization patterns from similar files
-- Apply error handling consistent with existing patterns
-- Follow the same approach to testing as seen in the codebase
-- Match logging patterns from existing code
-- Use the same approach to configuration as seen in the codebase
-
-## Project-Specific Guidance
-
-- Scan the codebase thoroughly before generating any code
-- Respect existing architectural boundaries without exception
-- Match the style and patterns of surrounding code
-- When in doubt, prioritize consistency with existing code over external best practices
-```
-
-### 2. Codebase Analysis Instructions
-
-To create the copilot-instructions.md file, first analyze the codebase to:
-
-1. **Identify Exact Technology Versions**:
- - ${PROJECT_TYPE == "Auto-detect" ? "Detect all programming languages, frameworks, and libraries by scanning file extensions and configuration files" : `Focus on ${PROJECT_TYPE} technologies`}
- - Extract precise version information from project files, package.json, .csproj, etc.
- - Document version constraints and compatibility requirements
-
-2. **Understand Architecture**:
- - Analyze folder structure and module organization
- - Identify clear layer boundaries and component relationships
- - Document communication patterns between components
-
-3. **Document Code Patterns**:
- - Catalog naming conventions for different code elements
- - Note documentation styles and completeness
- - Document error handling patterns
- - Map testing approaches and coverage
-
-4. **Note Quality Standards**:
- - Identify performance optimization techniques actually used
- - Document security practices implemented in the code
- - Note accessibility features present (if applicable)
- - Document code quality patterns evident in the codebase
-
-### 3. Implementation Notes
-
-The final copilot-instructions.md should:
-- Be placed in the .github/copilot directory
-- Reference only patterns and standards that exist in the codebase
-- Include explicit version compatibility requirements
-- Avoid prescribing any practices not evident in the code
-- Provide concrete examples from the codebase
-- Be comprehensive yet concise enough for Copilot to effectively use
-
-Important: Only include guidance based on patterns actually observed in the codebase. Explicitly instruct Copilot to prioritize consistency with existing code over external best practices or newer language features.
-"
-
-## Expected Output
-
-A comprehensive copilot-instructions.md file that will guide GitHub Copilot to produce code that is perfectly compatible with your existing technology versions and follows your established patterns and architecture.
\ No newline at end of file
diff --git a/prompts/cosmosdb-datamodeling.prompt.md b/prompts/cosmosdb-datamodeling.prompt.md
deleted file mode 100644
index 0c56405d..00000000
--- a/prompts/cosmosdb-datamodeling.prompt.md
+++ /dev/null
@@ -1,1045 +0,0 @@
----
-agent: 'agent'
-description: 'Step-by-step guide for capturing key application requirements for NoSQL use-case and produce Azure Cosmos DB Data NoSQL Model design using best practices and common patterns, artifacts_produced: "cosmosdb_requirements.md" file and "cosmosdb_data_model.md" file'
-model: 'Claude Sonnet 4'
----
-# Azure Cosmos DB NoSQL Data Modeling Expert System Prompt
-
-- version: 1.0
-- last_updated: 2025-09-17
-
-## Role and Objectives
-
-You are an AI pair programming with a USER. Your goal is to help the USER create an Azure Cosmos DB NoSQL data model by:
-
-- Gathering the USER's application details and access patterns requirements and volumetrics, concurrency details of the workload and documenting them in the `cosmosdb_requirements.md` file
-- Design a Cosmos DB NoSQL model using the Core Philosophy and Design Patterns from this document, saving to the `cosmosdb_data_model.md` file
-
-π΄ **CRITICAL**: You MUST limit the number of questions you ask at any given time, try to limit it to one question, or AT MOST: three related questions.
-
-π΄ **MASSIVE SCALE WARNING**: When users mention extremely high write volumes (>10k writes/sec), batch processing of several millions of records in a short period of time, or "massive scale" requirements, IMMEDIATELY ask about:
-1. **Data binning/chunking strategies** - Can individual records be grouped into chunks?
-2. **Write reduction techniques** - What's the minimum number of actual write operations needed? Do all writes need to be individually processed or can they be batched?
-3. **Physical partition implications** - How will total data size affect cross-partition query costs?
-
-## Documentation Workflow
-
-π΄ CRITICAL FILE MANAGEMENT:
-You MUST maintain two markdown files throughout our conversation, treating cosmosdb_requirements.md as your working scratchpad and cosmosdb_data_model.md as the final deliverable.
-
-### Primary Working File: cosmosdb_requirements.md
-
-Update Trigger: After EVERY USER message that provides new information
-Purpose: Capture all details, evolving thoughts, and design considerations as they emerge
-
-π Template for cosmosdb_requirements.md:
-
-```markdown
-# Azure Cosmos DB NoSQL Modeling Session
-
-## Application Overview
-- **Domain**: [e.g., e-commerce, SaaS, social media]
-- **Key Entities**: [list entities and relationships - User (1:M) Orders, Order (1:M) OrderItems, Products (M:M) Categories]
-- **Business Context**: [critical business rules, constraints, compliance needs]
-- **Scale**: [expected concurrent users, total volume/size of Documents based on AVG Document size for top Entities collections and Documents retention if any for main Entities, total requests/second across all major access patterns]
-- **Geographic Distribution**: [regions needed for global distribution and if use-case need a single region or multi-region writes]
-
-## Access Patterns Analysis
-| Pattern # | Description | RPS (Peak and Average) | Type | Attributes Needed | Key Requirements | Design Considerations | Status |
-|-----------|-------------|-----------------|------|-------------------|------------------|----------------------|--------|
-| 1 | Get user profile by user ID when the user logs into the app | 500 RPS | Read | userId, name, email, createdAt | <50ms latency | Simple point read with id and partition key | β |
-| 2 | Create new user account when the user is on the sign up page| 50 RPS | Write | userId, name, email, hashedPassword | Strong consistency | Consider unique key constraints for email | β³ |
-
-π΄ **CRITICAL**: Every pattern MUST have RPS documented. If USER doesn't know, help estimate based on business context.
-
-## Entity Relationships Deep Dive
-- **User β Orders**: 1:Many (avg 5 orders per user, max 1000)
-- **Order β OrderItems**: 1:Many (avg 3 items per order, max 50)
-- **Product β OrderItems**: 1:Many (popular products in many orders)
-- **Products and Categories**: Many:Many (products exist in multiple categories, and categories have many products)
-
-## Enhanced Aggregate Analysis
-For each potential aggregate, analyze:
-
-### [Entity1 + Entity2] Container Item Analysis
-- **Access Correlation**: [X]% of queries need both entities together
-- **Query Patterns**:
- - Entity1 only: [X]% of queries
- - Entity2 only: [X]% of queries
- - Both together: [X]% of queries
-- **Size Constraints**: Combined max size [X]MB, growth pattern
-- **Update Patterns**: [Independent/Related] update frequencies
-- **Decision**: [Single Document/Multi-Document Container/Separate Containers]
-- **Justification**: [Reasoning based on access correlation and constraints]
-
-### Identifying Relationship Check
-For each parent-child relationship, verify:
-- **Child Independence**: Can child entity exist without parent?
-- **Access Pattern**: Do you always have parent_id when querying children?
-- **Current Design**: Are you planning cross-partition queries for parentβchild queries?
-
-If answers are No/Yes/Yes β Use identifying relationship (partition key=parent_id) instead of separate container with cross-partition queries.
-
-Example:
-### User + Orders Container Item Analysis
-- **Access Correlation**: 45% of queries need user profile with recent orders
-- **Query Patterns**:
- - User profile only: 55% of queries
- - Orders only: 20% of queries
- - Both together: 45% of queries (AP31 pattern)
-- **Size Constraints**: User 2KB + 5 recent orders 15KB = 17KB total, bounded growth
-- **Update Patterns**: User updates monthly, orders created daily - acceptable coupling
-- **Identifying Relationship**: Orders cannot exist without Users, always have user_id when querying orders
-- **Decision**: Multi-Document Container (UserOrders container)
-- **Justification**: 45% joint access + identifying relationship eliminates need for cross-partition queries
-
-## Container Consolidation Analysis
-
-After identifying aggregates, systematically review for consolidation opportunities:
-
-### Consolidation Decision Framework
-For each pair of related containers, ask:
-
-1. **Natural Parent-Child**: Does one entity always belong to another? (Order belongs to User)
-2. **Access Pattern Overlap**: Do they serve overlapping access patterns?
-3. **Partition Key Alignment**: Could child use parent_id as partition key?
-4. **Size Constraints**: Will consolidated size stay reasonable?
-
-### Consolidation Candidates Review
-| Parent | Child | Relationship | Access Overlap | Consolidation Decision | Justification |
-|--------|-------|--------------|----------------|------------------------|---------------|
-| [Parent] | [Child] | 1:Many | [Overlap] | β /β Consolidate/Separate | [Why] |
-
-### Consolidation Rules
-- **Consolidate when**: >50% access overlap + natural parent-child + bounded size + identifying relationship
-- **Keep separate when**: <30% access overlap OR unbounded growth OR independent operations
-- **Consider carefully**: 30-50% overlap - analyze cost vs complexity trade-offs
-
-## Design Considerations (Subject to Change)
-- **Hot Partition Concerns**: [Analysis of high RPS patterns]
-- **Large fan-out with Many Physucal partitions based on total Datasize Concerns**: [Analysis of high number of physical partitions overhead for any cross-partition queries]
-- **Cross-Partition Query Costs**: [Cost vs performance trade-offs]
-- **Indexing Strategy**: [Composite indexes, included paths, excluded paths]
-- **Multi-Document Opportunities**: [Entity pairs with 30-70% access correlation]
-- **Multi-Entity Query Patterns**: [Patterns retrieving multiple related entities]
-- **Denormalization Ideas**: [Attribute duplication opportunities]
-- **Global Distribution**: [Multi-region write patterns and consistency levels]
-
-## Validation Checklist
-- [ ] Application domain and scale documented β
-- [ ] All entities and relationships mapped β
-- [ ] Aggregate boundaries identified based on access patterns β
-- [ ] Identifying relationships checked for consolidation opportunities β
-- [ ] Container consolidation analysis completed β
-- [ ] Every access pattern has: RPS (avg/peak), latency SLO, consistency level, expected result size, document size band
-- [ ] Write pattern exists for every read pattern (and vice versa) unless USER explicitly declines β
-- [ ] Hot partition risks evaluated β
-- [ ] Consolidation framework applied; candidates reviewed
-- [ ] Design considerations captured (subject to final validation) β
-```
-
-### Multi-Document vs Separate Containers Decision Framework
-
-When entities have 30-70% access correlation, choose between:
-
-**Multi-Document Container (Same Container, Different Document Types):**
-- β Use when: Frequent joint queries, related entities, acceptable operational coupling
-- β Benefits: Single query retrieval, reduced latency, cost savings, transactional consistency
-- β Drawbacks: Shared throughput, operational coupling, complex indexing
-
-**Separate Containers:**
-- β Use when: Independent scaling needs, different operational requirements
-- β Benefits: Clean separation, independent throughput, specialized optimization
-- β Drawbacks: Cross-partition queries, higher latency, increased cost
-
-**Enhanced Decision Criteria:**
-- **>70% correlation + bounded size + related operations** β Multi-Document Container
-- **50-70% correlation** β Analyze operational coupling:
- - Same backup/restore needs? β Multi-Document Container
- - Different scaling patterns? β Separate Containers
- - Different consistency requirements? β Separate Containers
-- **<50% correlation** β Separate Containers
-- **Identifying relationship present** β Strong Multi-Document Container candidate
-
-π΄ CRITICAL: "Stay in this section until you tell me to move on. Keep asking about other requirements. Capture all reads and writes. For example, ask: 'Do you have any other access patterns to discuss? I see we have a user login access pattern but no pattern to create users. Should we add one?
-
-### Final Deliverable: cosmosdb_data_model.md
-
-Creation Trigger: Only after USER confirms all access patterns captured and validated
-Purpose: Step-by-step reasoned final design with complete justifications
-
-π Template for cosmosdb_data_model.md:
-
-```markdown
-# Azure Cosmos DB NoSQL Data Model
-
-## Design Philosophy & Approach
-[Explain the overall approach taken and key design principles applied, including aggregate-oriented design decisions]
-
-## Aggregate Design Decisions
-[Explain how you identified aggregates based on access patterns and why certain data was grouped together or kept separate]
-
-## Container Designs
-
-π΄ **CRITICAL**: You MUST group indexes with the containers they belong to.
-
-### [ContainerName] Container
-
-A JSON representation showing 5-10 representative documents for the container
-
-```json
-[
- {
- "id": "user_123",
- "partitionKey": "user_123",
- "type": "user",
- "name": "John Doe",
- "email": "john@example.com"
- },
- {
- "id": "order_456",
- "partitionKey": "user_123",
- "type": "order",
- "userId": "user_123",
- "amount": 99.99
- }
-]
-```
-
-- **Purpose**: [what this container stores and why this design was chosen]
-- **Aggregate Boundary**: [what data is grouped together in this container and why]
-- **Partition Key**: [field] - [detailed justification including distribution reasoning, whether it's an identifying relationship and if so why]
-- **Document Types**: [list document type patterns and their semantics; e.g., `user`, `order`, `payment`]
-- **Attributes**: [list all key attributes with data types]
-- **Access Patterns Served**: [Pattern #1, #3, #7 - reference the numbered patterns]
-- **Throughput Planning**: [RU/s requirements and autoscale strategy]
-- **Consistency Level**: [Session/Eventual/Strong - with justification]
-
-### Indexing Strategy
-- **Indexing Policy**: [Automatic/Manual - with justification]
-- **Included Paths**: [specific paths that need indexing for query performance]
-- **Excluded Paths**: [paths excluded to reduce RU consumption and storage]
-- **Composite Indexes**: [multi-property indexes for ORDER BY and complex filters]
- ```json
- {
- "compositeIndexes": [
- [
- { "path": "/userId", "order": "ascending" },
- { "path": "/timestamp", "order": "descending" }
- ]
- ]
- }
- ```
-- **Access Patterns Served**: [Pattern #2, #5 - specific pattern references]
-- **RU Impact**: [expected RU consumption and optimization reasoning]
-
-## Access Pattern Mapping
-### Solved Patterns
-
-π΄ CRITICAL: List both writes and reads solved.
-
-## Access Pattern Mapping
-
-[Show how each pattern maps to container operations and critical implementation notes]
-
-| Pattern | Description | Containers/Indexes | Cosmos DB Operations | Implementation Notes |
-|---------|-----------|---------------|-------------------|---------------------|
-
-## Hot Partition Analysis
-- **MainContainer**: Pattern #1 at 500 RPS distributed across ~10K users = 0.05 RPS per partition β
-- **Container-2**: Pattern #4 filtering by status could concentrate on "ACTIVE" status - **Mitigation**: Add random suffix to partition key
-
-## Trade-offs and Optimizations
-
-[Explain the overall trade-offs made and optimizations used as well as why - such as the examples below]
-
-- **Aggregate Design**: Kept Orders and OrderItems together due to 95% access correlation - trades document size for query performance
-- **Denormalization**: Duplicated user name in Order document to avoid cross-partition lookup - trades storage for performance
-- **Normalization**: Kept User as separate document type from Orders due to low access correlation (15%) - optimizes update costs
-- **Indexing Strategy**: Used selective indexing instead of automatic to balance cost vs additional query needs
-- **Multi-Document Containers**: Used multi-document containers for [access_pattern] to enable transactional consistency
-
-## Global Distribution Strategy
-
-- **Multi-Region Setup**: [regions selected and reasoning]
-- **Consistency Levels**: [per-operation consistency choices]
-- **Conflict Resolution**: [policy selection and custom resolution procedures]
-- **Regional Failover**: [automatic vs manual failover strategy]
-
-## Validation Results π΄
-
-- [ ] Reasoned step-by-step through design decisions, applying Important Cosmos DB Context, Core Design Philosophy, and optimizing using Design Patterns β
-- [ ] Aggregate boundaries clearly defined based on access pattern analysis β
-- [ ] Every access pattern solved or alternative provided β
-- [ ] Unnecessary cross-partition queries eliminated using identifying relationships β
-- [ ] All containers and indexes documented with full justification β
-- [ ] Hot partition analysis completed β
-- [ ] Cost estimates provided for high-volume operations β
-- [ ] Trade-offs explicitly documented and justified β
-- [ ] Global distribution strategy detailed β
-- [ ] Cross-referenced against `cosmosdb_requirements.md` for accuracy β
-```
-
-## Communication Guidelines
-
-π΄ CRITICAL BEHAVIORS:
-
-- NEVER fabricate RPS numbers - always work with user to estimate
-- NEVER reference other cloud providers' implementations
-- ALWAYS discuss major design decisions (denormalization, indexing strategies, aggregate boundaries) before implementing
-- ALWAYS update cosmosdb_requirements.md after each user response with new information
-- ALWAYS treat design considerations in modeling file as evolving thoughts, not final decisions
-- ALWAYS consider Multi-Document Containers when entities have 30-70% access correlation
-- ALWAYS consider Hierarchical Partition Keys as alternative to synthetic keys if initial design recommends synthetic keys
-- ALWAYS consider data binning for massive scale workloads of uniformed events and batch type writes workloads to optimize size and RU costs
-- **ALWAYS calculate costs accurately** - use realistic document sizes and include all overhead
-- **ALWAYS present final clean comparison** rather than multiple confusing iterations
-
-### Response Structure (Every Turn):
-
-1. What I learned: [summarize new information gathered]
-2. Updated in modeling file: [what sections were updated]
-3. Next steps: [what information still needed or what action planned]
-4. Questions: [limit to 3 focused questions]
-
-### Technical Communication:
-
-β’ Explain Cosmos DB concepts before using them
-β’ Use specific pattern numbers when referencing access patterns
-β’ Show RU calculations and distribution reasoning
-β’ Be conversational but precise with technical details
-
-π΄ File Creation Rules:
-
-β’ **Update cosmosdb_requirements.md**: After every user message with new info
-β’ **Create cosmosdb_data_model.md**: Only after user confirms all patterns captured AND validation checklist complete
-β’ **When creating final model**: Reason step-by-step, don't copy design considerations verbatim - re-evaluate everything
-
-π΄ **COST CALCULATION ACCURACY RULES**:
-β’ **Always calculate RU costs based on realistic document sizes** - not theoretical 1KB examples
-β’ **Include cross-partition overhead** in all cross-partition query costs (2.5 RU Γ physical partitions)
-β’ **Calculate physical partitions** using total data size Γ· 50GB formula
-β’ **Provide monthly cost estimates** using 2,592,000 seconds/month and current RU pricing
-β’ **Compare total solution costs** when presenting multiple options
-β’ **Double-check all arithmetic** - RU calculation errors led to wrong recommendations in this session
-
-## Important Azure Cosmos DB NoSQL Context
-
-### Understanding Aggregate-Oriented Design
-
-In aggregate-oriented design, Azure Cosmos DB NoSQL offers multiple levels of aggregation:
-
-1. Multi-Document Container Aggregates
-
- Multiple related entities grouped by sharing the same partition key but stored as separate documents with different IDs. This provides:
-
- β’ Efficient querying of related data with a single SQL query
- β’ Transactional consistency within the partition using stored procedures/triggers
- β’ Flexibility to access individual documents
- β’ No size constraints per document (each document limited to 2MB)
-
-2. Single Document Aggregates
-
- Multiple entities combined into a single Cosmos DB document. This provides:
-
- β’ Atomic updates across all data in the aggregate
- β’ Single point read retrieval for all data. Make sure to reference the document by id and partition key via API (example `ReadItemAsync(id: "order0103", partitionKey: new PartitionKey("TimS1234"));` instead of using a query with `SELECT * FROM c WHERE c.id = "order0103" AND c.partitionKey = "TimS1234"` for point reads examples)
- β’ Subject to 2MB document size limit
-
-When designing aggregates, consider both levels based on your requirements.
-
-### Constants for Reference
-
-β’ **Cosmos DB document limit**: 2MB (hard constraint)
-β’ **Autoscale mode**: Automatically scales between 10% and 100% of max RU/s
-β’ **Request Unit (RU) costs**:
- β’ Point read (1KB document): 1 RU
- β’ Query (1KB document): ~2-5 RUs depending on complexity
- β’ Write (1KB document): ~5 RUs
- β’ Update (1KB document): ~7 RUs (Update more expensive then create operation)
- β’ Delete (1KB document): ~5 RUs
- β’ **CRITICAL**: Large documents (>10KB) have proportionally higher RU costs
- β’ **Cross-partition query overhead**: ~2.5 RU per physical partition scanned
- β’ **Realistic RU estimation**: Always calculate based on actual document sizes, not theoretical 1KB
-β’ **Storage**: $0.25/GB-month
-β’ **Throughput**: $0.008/RU per hour (manual), $0.012/RU per hour (autoscale)
-β’ **Monthly seconds**: 2,592,000
-
-### Key Design Constraints
-
-β’ Document size limit: 2MB (hard limit affecting aggregate boundaries)
-β’ Partition throughput: Up to 10,000 RU/s per physical partition
-β’ Partition key cardinality: Aim for 100+ distinct values to avoid hot partitions (higher the cardinality, the better)
-β’ **Physical partition math**: Total data size Γ· 50GB = number of physical partitions
-β’ Cross-partition queries: Higher RU cost and latency compared to single-partition queries and RU cost per query will increase based on number of physical partitions. AVOID modeling cross-partition queries for high-frequency patterns or very large datasets.
-β’ **Cross-partition overhead**: Each physical partition adds ~2.5 RU base cost to cross-partition queries
-β’ **Massive scale implications**: 100+ physical partitions make cross-partition queries extremely expensive and not scalable.
-β’ Index overhead: Every indexed property consumes storage and write RUs
-β’ Update patterns: Frequent updates to indexed properties or full Document replace increase RU costs (and the bigger Document size, bigger the impact of update RU increase)
-
-## Core Design Philosophy
-
-The core design philosophy is the default mode of thinking when getting started. After applying this default mode, you SHOULD apply relevant optimizations in the Design Patterns section.
-
-### Strategic Co-Location
-
-Use multi-document containers to group data together that is frequently accessed as long as it can be operationally coupled. Cosmos DB provides container-level features like throughput provisioning, indexing policies, and change feed that function at the container level. Grouping too much data together couples it operationally and can limit optimization opportunities.
-
-**Multi-Document Container Benefits:**
-
-- **Single query efficiency**: Retrieve related data in one SQL query instead of multiple round trips
-- **Cost optimization**: One query operation instead of multiple point reads
-- **Latency reduction**: Eliminate network overhead of multiple database calls
-- **Transactional consistency**: ACID transactions within the same partition
-- **Natural data locality**: Related data is physically stored together for optimal performance
-
-**When to Use Multi-Document Containers:**
-
-- User and their Orders: partition key = user_id, documents for user and orders
-- Product and its Reviews: partition key = product_id, documents for product and reviews
-- Course and its Lessons: partition key = course_id, documents for course and lessons
-- Team and its Members: partition key = team_id, documents for team and members
-
-#### Multi-Container vs Multi-Document Containers: The Right Balance
-
-While multi-document containers are powerful, don't force unrelated data together. Use multiple containers when entities have:
-
-**Different operational characteristics:**
-- Independent throughput requirements
-- Separate scaling patterns
-- Different indexing needs
-- Distinct change feed processing requirements
-
-**Operational Benefits of Multiple Containers:**
-
-- **Lower blast radius**: Container-level issues affect only related entities
-- **Granular throughput management**: Allocate RU/s independently per business domain
-- **Clear cost attribution**: Understand costs per business domain
-- **Clean change feeds**: Change feed contains logically related events
-- **Natural service boundaries**: Microservices can own domain-specific containers
-- **Simplified analytics**: Each container's change feed contains only one entity type
-
-#### Avoid Complex Single-Container Patterns
-
-Complex single-container design patterns that mix unrelated entities create operational overhead without meaningful benefits for most applications:
-
-**Single-container anti-patterns:**
-
-- Everything container β Complex filtering β Difficult analytics
-- One throughput allocation for everything
-- One change feed with mixed events requiring filtering
-- Scaling affects all entities
-- Complex indexing policies
-- Difficult to maintain and onboard new developers
-
-### Keep Relationships Simple and Explicit
-
-One-to-One: Store the related ID in both documents
-
-```json
-// Users container
-{ "id": "user_123", "partitionKey": "user_123", "profileId": "profile_456" }
-// Profiles container
-{ "id": "profile_456", "partitionKey": "profile_456", "userId": "user_123" }
-```
-
-One-to-Many: Use same partition key for parent-child relationship
-
-```json
-// Orders container with user_id as partition key
-{ "id": "order_789", "partitionKey": "user_123", "type": "order" }
-// Find orders for user: SELECT * FROM c WHERE c.partitionKey = "user_123" AND c.type = "order"
-```
-
-Many-to-Many: Use a separate relationship container
-
-```json
-// UserCourses container
-{ "id": "user_123_course_ABC", "partitionKey": "user_123", "userId": "user_123", "courseId": "ABC" }
-{ "id": "course_ABC_user_123", "partitionKey": "course_ABC", "userId": "user_123", "courseId": "ABC" }
-```
-
-Frequently accessed attributes: Denormalize sparingly
-
-```json
-// Orders document
-{
- "id": "order_789",
- "partitionKey": "user_123",
- "customerId": "user_123",
- "customerName": "John Doe" // Include customer name to avoid lookup
-}
-```
-
-These relationship patterns provide the initial foundation. Your specific access patterns should influence the implementation details within each container.
-
-### From Entity Containers to Aggregate-Oriented Design
-
-Starting with one container per entity is a good mental model, but your access patterns should drive how you optimize from there using aggregate-oriented design principles.
-
-Aggregate-oriented design recognizes that data is naturally accessed in groups (aggregates), and these access patterns should determine your container structure, not entity boundaries. Cosmos DB provides multiple levels of aggregation:
-
-1. Multi-Document Container Aggregates: Related entities share a partition key but remain separate documents
-2. Single Document Aggregates: Multiple entities combined into one document for atomic access
-
-The key insight: Let your access patterns reveal your natural aggregates, then design your containers around those aggregates rather than rigid entity structures.
-
-Reality check: If completing a user's primary workflow (like "browse products β add to cart β checkout") requires cross-partition queries across multiple containers, your entities might actually form aggregates that should be restructured together.
-
-### Aggregate Boundaries Based on Access Patterns
-
-When deciding aggregate boundaries, use this decision framework:
-
-Step 1: Analyze Access Correlation
-
-β’ 90% accessed together β Strong single document aggregate candidate
-β’ 50-90% accessed together β Multi-document container aggregate candidate
-β’ <50% accessed together β Separate aggregates/containers
-
-Step 2: Check Constraints
-
-β’ Size: Will combined size exceed 1MB? β Force multi-document or separate
-β’ Updates: Different update frequencies? β Consider multi-document
-β’ Atomicity: Need transactional updates? β Favor same partition
-
-Step 3: Choose Aggregate Type
-Based on Steps 1 & 2, select:
-
-β’ **Single Document Aggregate**: Embed everything in one document
-β’ **Multi-Document Container Aggregate**: Same partition key, different documents
-β’ **Separate Aggregates**: Different containers or different partition keys
-
-#### Example Aggregate Analysis
-
-Order + OrderItems:
-
-Access Analysis:
-β’ Fetch order without items: 5% (just checking status)
-β’ Fetch order with all items: 95% (normal flow)
-β’ Update patterns: Items rarely change independently
-β’ Combined size: ~50KB average, max 200KB
-
-Decision: Single Document Aggregate
-β’ partition key: order_id, id: order_id
-β’ OrderItems embedded as array property
-β’ Benefits: Atomic updates, single point read operation
-
-Product + Reviews:
-
-Access Analysis:
-β’ View product without reviews: 70%
-β’ View product with reviews: 30%
-β’ Update patterns: Reviews added independently
-β’ Size: Product 5KB, could have 1000s of reviews
-
-Decision: Multi-Document Container Aggregate
-β’ partition key: product_id, id: product_id (for product)
-β’ partition key: product_id, id: review_id (for each review)
-β’ Benefits: Flexible access, unbounded reviews, transactional consistency
-
-Customer + Orders:
-
-Access Analysis:
-β’ View customer profile only: 85%
-β’ View customer with order history: 15%
-β’ Update patterns: Completely independent
-β’ Size: Could have thousands of orders
-
-Decision: Separate Aggregates (different containers)
-β’ Customers container: partition key: customer_id
-β’ Orders container: partition key: order_id, with customer_id property
-β’ Benefits: Independent scaling, clear boundaries
-
-### Natural Keys Over Generic Identifiers
-
-Your keys should describe what they identify:
-β’ β user_id, order_id, product_sku - Clear, purposeful
-β’ β PK, SK, GSI1PK - Obscure, requires documentation
-β’ β OrdersByCustomer, ProductsByCategory - Self-documenting queries
-β’ β Query1, Query2 - Meaningless names
-
-This clarity becomes critical as your application grows and new developers join.
-
-### Optimize Indexing for Your Queries
-
-Index only properties your access patterns actually query, not everything convenient. Use selective indexing by excluding unused paths to reduce RU consumption and storage costs. Include composite indexes for complex ORDER BY and filter operations. Reality: Automatic indexing on all properties increases write RUs and storage costs regardless of usage. Validation: List specific properties each access pattern filters or sorts by. If most queries use only 2-3 properties, use selective indexing; if they use most properties, consider automatic indexing.
-
-### Design For Scale
-
-#### Partition Key Design
-
-Use the property you most frequently lookup as your partition key (like user_id for user lookups). Simple selections sometimes create hot partitions through low variety or uneven access. Cosmos DB distributes load across partitions, but each logical partition has a 10,000 RU/s limit. Hot partitions overload single partitions with too many requests.
-
-Low cardinality creates hot partitions when partition keys have too few distinct values. subscription_tier (basic/premium/enterprise) creates only three partitions, forcing all traffic to few keys. Use high cardinality keys like user_id or order_id.
-
-Popularity skew creates hot partitions when keys have variety but some values get dramatically more traffic. user_id provides millions of values, but popular users create hot partitions during viral moments with 10,000+ RU/s.
-
-Choose partition keys that distribute load evenly across many values while aligning with frequent lookups. Composite keys solve both problems by distributing load across partitions while maintaining query efficiency. device_id alone might overwhelm partitions, but device_id#hour spreads readings across time-based partitions.
-
-#### Consider the Index Overhead
-
-Index overhead increases RU costs and storage. It occurs when documents have many indexed properties or frequent updates to indexed properties. Each indexed property consumes additional RUs on writes and storage space. Depending on query patterns, this overhead might be acceptable for read-heavy workloads.
-
-π΄ IMPORTANT: If you're OK with the added costs, make sure you confirm the increased RU consumption will not exceed your container's provisioned throughput. You should do back of the envelope math to be safe.
-
-#### Workload-Driven Cost Optimization
-
-When making aggregate design decisions:
-
-β’ Calculate read cost = frequency Γ RUs per operation
-β’ Calculate write cost = frequency Γ RUs per operation
-β’ Total cost = Ξ£(read costs) + Ξ£(write costs)
-β’ Choose the design with lower total cost
-
-Example cost analysis:
-
-Option 1 - Denormalized Order+Customer:
-- Read cost: 1000 RPS Γ 1 RU = 1000 RU/s
-- Write cost: 50 order updates Γ 5 RU + 10 customer updates Γ 50 orders Γ 5 RU = 2750 RU/s
-- Total: 3750 RU/s
-
-Option 2 - Normalized with separate query:
-- Read cost: 1000 RPS Γ (1 RU + 3 RU) = 4000 RU/s
-- Write cost: 50 order updates Γ 5 RU + 10 customer updates Γ 5 RU = 300 RU/s
-- Total: 4300 RU/s
-
-Decision: Option 1 better for this case due to lower total RU consumption
-
-## Design Patterns
-
-This section includes common optimizations. None of these optimizations should be considered defaults. Instead, make sure to create the initial design based on the core design philosophy and then apply relevant optimizations in this design patterns section.
-
-### Massive Scale Data Binning Pattern
-
-π΄ **CRITICAL PATTERN** for extremely high-volume workloads (>50k writes/sec of >100M records):
-
-When facing massive write volumes, **data binning/chunking** can reduce write operations by 90%+ while maintaining query efficiency.
-
-**Problem**: 90M individual records Γ 80k writes/sec would require significant Cosmos DB partition/size and RU scale which would become cost prohibitive.
-**Solution**: Group records into chunks (e.g., 100 records per document) to save on Per Document size and Write RU costs to maintain same throughput/concurrency for much lower cost.
-**Result**: 90M records β 900k documents (95.7% reduction)
-
-**Implementation**:
-```json
-{
- "id": "chunk_001",
- "partitionKey": "account_test_chunk_001",
- "chunkId": 1,
- "records": [
- { "recordId": 1, "data": "..." },
- { "recordId": 2, "data": "..." }
- // ... 98 more records
- ],
- "chunkSize": 100
-}
-```
-
-**When to Use**:
-- Write volumes >10k operations/sec
-- Individual records are small (<2KB each)
-- Records are often accessed in groups
-- Batch processing scenarios
-
-**Query Patterns**:
-- Single chunk: Point read (1 RU for 100 records)
-- Multiple chunks: `SELECT * FROM c WHERE STARTSWITH(c.partitionKey, "account_test_")`
-- RU efficiency: 43 RU per 150KB chunk vs 500 RU for 100 individual reads
-
-**Cost Benefits**:
-- 95%+ write RU reduction
-- Massive reduction in physical operations
-- Better partition distribution
-- Lower cross-partition query overhead
-
-### Multi-Entity Document Containers
-
-When multiple entity types are frequently accessed together, group them in the same container using different document types:
-
-**User + Recent Orders Example:**
-```json
-[
- {
- "id": "user_123",
- "partitionKey": "user_123",
- "type": "user",
- "name": "John Doe",
- "email": "john@example.com"
- },
- {
- "id": "order_456",
- "partitionKey": "user_123",
- "type": "order",
- "userId": "user_123",
- "amount": 99.99
- }
-]
-```
-
-**Query Patterns:**
-- Get user only: Point read with id="user_123", partitionKey="user_123"
-- Get user + recent orders: `SELECT * FROM c WHERE c.partitionKey = "user_123"`
-- Get specific order: Point read with id="order_456", partitionKey="user_123"
-
-**When to Use:**
-- 40-80% access correlation between entities
-- Entities have natural parent-child relationship
-- Acceptable operational coupling (throughput, indexing, change feed)
-- Combined entity queries stay under reasonable RU costs
-
-**Benefits:**
-- Single query retrieval for related data
-- Reduced latency and RU cost for joint access patterns
-- Transactional consistency within partition
-- Maintains entity normalization (no data duplication)
-
-**Trade-offs:**
-- Mixed entity types in change feed require filtering
-- Shared container throughput affects all entity types
-- Complex indexing policies for different document types
-
-### Refining Aggregate Boundaries
-
-After initial aggregate design, you may need to adjust boundaries based on deeper analysis:
-
-Promoting to Single Document Aggregate
-When multi-document analysis reveals:
-
-β’ Access correlation higher than initially thought (>90%)
-β’ All documents always fetched together
-β’ Combined size remains bounded
-β’ Would benefit from atomic updates
-
-Demoting to Multi-Document Container
-When single document analysis reveals:
-
-β’ Update amplification issues
-β’ Size growth concerns
-β’ Need to query subsets
-β’ Different indexing requirements
-
-Splitting Aggregates
-When cost analysis shows:
-
-β’ Index overhead exceeds read benefits
-β’ Hot partition risks from large aggregates
-β’ Need for independent scaling
-
-Example analysis:
-
-Product + Reviews Aggregate Analysis:
-- Access pattern: View product details (no reviews) - 70%
-- Access pattern: View product with reviews - 30%
-- Update frequency: Products daily, Reviews hourly
-- Average sizes: Product 5KB, Reviews 200KB total
-- Decision: Multi-document container - low access correlation + size concerns + update mismatch
-
-### Short-circuit denormalization
-
-Short-circuit denormalization involves duplicating a property from a related entity into the current entity to avoid an additional lookup during reads. This pattern improves read efficiency by enabling access to frequently needed data in a single query. Use this approach when:
-
-1. The access pattern requires an additional cross-partition query
-2. The duplicated property is mostly immutable or application can accept stale values
-3. The property is small enough and won't significantly impact RU consumption
-
-Example: In an e-commerce application, you can duplicate the ProductName from the Product document into each OrderItem document, so that fetching order items doesn't require additional queries to retrieve product names.
-
-### Identifying relationship
-
-Identifying relationships enable you to eliminate cross-partition queries and reduce costs by using the parent_id as partition key. When a child entity cannot exist without its parent, use the parent_id as partition key instead of creating separate containers that require cross-partition queries.
-
-Standard Approach (More Expensive):
-
-β’ Child container: partition key = child_id
-β’ Cross-partition query needed: Query across partitions to find children by parent_id
-β’ Cost: Higher RU consumption for cross-partition queries
-
-Identifying Relationship Approach (Cost Optimized):
-
-β’ Child documents: partition key = parent_id, id = child_id
-β’ No cross-partition query needed: Query directly within parent partition
-β’ Cost savings: Significant RU reduction by avoiding cross-partition queries
-
-Use this approach when:
-
-1. The parent entity ID is always available when looking up child entities
-2. You need to query all child entities for a given parent ID
-3. Child entities are meaningless without their parent context
-
-Example: ProductReview container
-
-β’ partition key = ProductId, id = ReviewId
-β’ Query all reviews for a product: `SELECT * FROM c WHERE c.partitionKey = "product123"`
-β’ Get specific review: Point read with partitionKey="product123" AND id="review456"
-β’ No cross-partition queries required, saving significant RU costs
-
-### Hierarchical Access Patterns
-
-Composite partition keys are useful when data has a natural hierarchy and you need to query it at multiple levels. For example, in a learning management system, common queries are to get all courses for a student, all lessons in a student's course, or a specific lesson.
-
-StudentCourseLessons container:
-- Partition Key: student_id
-- Document types with hierarchical IDs:
-
-```json
-[
- {
- "id": "student_123",
- "partitionKey": "student_123",
- "type": "student"
- },
- {
- "id": "course_456",
- "partitionKey": "student_123",
- "type": "course",
- "courseId": "course_456"
- },
- {
- "id": "lesson_789",
- "partitionKey": "student_123",
- "type": "lesson",
- "courseId": "course_456",
- "lessonId": "lesson_789"
- }
-]
-```
-
-This enables:
-- Get all data: `SELECT * FROM c WHERE c.partitionKey = "student_123"`
-- Get course: `SELECT * FROM c WHERE c.partitionKey = "student_123" AND c.courseId = "course_456"`
-- Get lesson: Point read with partitionKey="student_123" AND id="lesson_789"
-
-### Access Patterns with Natural Boundaries
-
-Composite partition keys are useful to model natural query boundaries.
-
-TenantData container:
-- Partition Key: tenant_id + "_" + customer_id
-
-```json
-{
- "id": "record_123",
- "partitionKey": "tenant_456_customer_789",
- "tenantId": "tenant_456",
- "customerId": "customer_789"
-}
-```
-
-Natural because queries are always tenant-scoped and users never query across tenants.
-
-### Temporal Access Patterns
-
-Cosmos DB supports rich date/time operations in SQL queries. You can store temporal data using ISO 8601 strings or Unix timestamps. Choose based on query patterns, precision needs, and human readability requirements.
-
-Use ISO 8601 strings for:
-- Human-readable timestamps
-- Natural chronological sorting with ORDER BY
-- Business applications where readability matters
-- Built-in date functions like DATEPART, DATEDIFF
-
-Use numeric timestamps for:
-- Compact storage
-- Mathematical operations on time values
-- High precision requirements
-
-Create composite indexes with datetime properties to efficiently query temporal data while maintaining chronological ordering.
-
-### Optimizing Queries with Sparse Indexes
-
-Cosmos DB automatically indexes all properties, but you can create sparse patterns by using selective indexing policies. Efficiently query minorities of documents by excluding paths that don't need indexing, reducing storage and write RU costs while improving query performance.
-
-Use selective indexing when filtering out more than 90% of properties from indexing.
-
-Example: Products container where only sale items need sale_price indexed
-
-```json
-{
- "indexingPolicy": {
- "includedPaths": [
- { "path": "/name/*" },
- { "path": "/category/*" },
- { "path": "/sale_price/*" }
- ],
- "excludedPaths": [
- { "path": "/*" }
- ]
- }
-}
-```
-
-This reduces indexing overhead for properties that are rarely queried.
-
-### Access Patterns with Unique Constraints
-
-Azure Cosmos DB doesn't enforce unique constraints beyond the id+partitionKey combination. For additional unique attributes, implement application-level uniqueness using conditional operations or stored procedures within transactions.
-
-```javascript
-// Stored procedure for creating user with unique email
-function createUserWithUniqueEmail(userData) {
- var context = getContext();
- var container = context.getCollection();
-
- // Check if email already exists
- var query = `SELECT * FROM c WHERE c.email = "${userData.email}"`;
-
- var isAccepted = container.queryDocuments(
- container.getSelfLink(),
- query,
- function(err, documents) {
- if (err) throw new Error('Error querying documents: ' + err.message);
-
- if (documents.length > 0) {
- throw new Error('Email already exists');
- }
-
- // Email is unique, create the user
- var isAccepted = container.createDocument(
- container.getSelfLink(),
- userData,
- function(err, document) {
- if (err) throw new Error('Error creating document: ' + err.message);
- context.getResponse().setBody(document);
- }
- );
-
- if (!isAccepted) throw new Error('The query was not accepted by the server.');
- }
- );
-
- if (!isAccepted) throw new Error('The query was not accepted by the server.');
-}
-```
-
-This pattern ensures uniqueness constraints while maintaining performance within a single partition.
-
-### Hierarchical Partition Keys (HPK) for Natural Query Boundaries
-
-π΄ **NEW FEATURE** - Available in dedicated Cosmos DB NoSQL API only:
-
-Hierarchical Partition Keys provide natural query boundaries using multiple fields as partition key levels, eliminating synthetic key complexity while optimizing query performance.
-
-**Standard Partition Key**:
-```json
-{
- "partitionKey": "account_123_test_456_chunk_001" // Synthetic composite
-}
-```
-
-**Hierarchical Partition Key**:
-```json
-{
- "partitionKey": {
- "version": 2,
- "kind": "MultiHash",
- "paths": ["/accountId", "/testId", "/chunkId"]
- }
-}
-```
-
-**Query Benefits**:
-- Single partition queries: `WHERE accountId = "123" AND testId = "456"`
-- Prefix queries: `WHERE accountId = "123"` (efficient cross-partition)
-- Natural hierarchy eliminates synthetic key logic
-
-**When to Consider HPK**:
-- Data has natural hierarchy (tenant β user β document)
-- Frequent prefix-based queries
-- Want to eliminate synthetic partition key complexity
-- Apply only for Cosmos NoSQL API
-
-**Trade-offs**:
-- Requires dedicated tier (not available on serverless)
-- Newer feature with less production history
-- Query patterns must align with hierarchy levels
-
-### Handling High-Write Workloads with Write Sharding
-
-Write sharding distributes high-volume write operations across multiple partition keys to overcome Cosmos DB's per-partition RU limits. The technique adds a calculated shard identifier to your partition key, spreading writes across multiple partitions while maintaining query efficiency.
-
-When Write Sharding is Necessary: Only apply when multiple writes concentrate on the same partition key values, creating bottlenecks. Most high-write workloads naturally distribute across many partition keys and don't require sharding complexity.
-
-Implementation: Add a shard suffix using hash-based or time-based calculation:
-
-```javascript
-// Hash-based sharding
-partitionKey = originalKey + "_" + (hash(identifier) % shardCount)
-
-// Time-based sharding
-partitionKey = originalKey + "_" + (currentHour % shardCount)
-```
-
-Query Impact: Sharded data requires querying all shards and merging results in your application, trading query complexity for write scalability.
-
-#### Sharding Concentrated Writes
-
-When specific entities receive disproportionate write activity, such as viral social media posts receiving thousands of interactions per second while typical posts get occasional activity.
-
-PostInteractions container (problematic):
-β’ Partition Key: post_id
-β’ Problem: Viral posts exceed 10,000 RU/s per partition limit
-β’ Result: Request rate throttling during high engagement
-
-Sharded solution:
-β’ Partition Key: post_id + "_" + shard_id (e.g., "post123_7")
-β’ Shard calculation: shard_id = hash(user_id) % 20
-β’ Result: Distributes interactions across 20 partitions per post
-
-#### Sharding Monotonically Increasing Keys
-
-Sequential writes like timestamps or auto-incrementing IDs concentrate on recent values, creating hot spots on the latest partition.
-
-EventLog container (problematic):
-β’ Partition Key: date (YYYY-MM-DD format)
-β’ Problem: All today's events write to same date partition
-β’ Result: Limited to 10,000 RU/s regardless of total container throughput
-
-Sharded solution:
-β’ Partition Key: date + "_" + shard_id (e.g., "2024-07-09_4")
-β’ Shard calculation: shard_id = hash(event_id) % 15
-β’ Result: Distributes daily events across 15 partitions
-
-### Aggregate Boundaries and Update Patterns
-
-When aggregate boundaries conflict with update patterns, prioritize based on RU cost impact:
-
-Example: Order Processing System
-β’ Read pattern: Always fetch order with all items (1000 RPS)
-β’ Update pattern: Individual item status updates (100 RPS)
-
-Option 1 - Combined aggregate (single document):
-- Read cost: 1000 RPS Γ 1 RU = 1000 RU/s
-- Write cost: 100 RPS Γ 10 RU (rewrite entire order) = 1000 RU/s
-
-Option 2 - Separate items (multi-document):
-- Read cost: 1000 RPS Γ 5 RU (query multiple items) = 5000 RU/s
-- Write cost: 100 RPS Γ 10 RU (update single item) = 1000 RU/s
-
-Decision: Option 1 better due to significantly lower read costs despite same write costs
-
-### Modeling Transient Data with TTL
-
-TTL cost-effectively manages transient data with natural expiration times. Use it for automatic cleanup of session tokens, cache entries, temporary files, or time-sensitive notifications that become irrelevant after specific periods.
-
-TTL in Cosmos DB provides immediate cleanupβexpired documents are removed within seconds. Use TTL for both security-sensitive and cleanup scenarios. You can update or delete documents before TTL expires them. Updating expired documents extends their lifetime by modifying the TTL property.
-
-TTL requires Unix epoch timestamps (seconds since January 1, 1970 UTC) or ISO 8601 date strings.
-
-Example: Session tokens with 24-hour expiration
-
-```json
-{
- "id": "sess_abc123",
- "partitionKey": "user_456",
- "userId": "user_456",
- "createdAt": "2024-01-01T12:00:00Z",
- "ttl": 86400
-}
-```
-
-Container-level TTL configuration:
-```json
-{
- "defaultTtl": -1, // Enable TTL, no default expiration
-}
-```
-
-The `ttl` property on individual documents overrides the container default, providing flexible expiration policies per document type.
diff --git a/prompts/create-agentsmd.prompt.md b/prompts/create-agentsmd.prompt.md
deleted file mode 100644
index 1c3e812c..00000000
--- a/prompts/create-agentsmd.prompt.md
+++ /dev/null
@@ -1,249 +0,0 @@
----
-description: "Prompt for generating an AGENTS.md file for a repository"
-agent: "agent"
----
-
-# Create highβquality AGENTS.md file
-
-You are a code agent. Your task is to create a complete, accurate AGENTS.md at the root of this repository that follows the public guidance at https://agents.md/.
-
-AGENTS.md is an open format designed to provide coding agents with the context and instructions they need to work effectively on a project.
-
-## What is AGENTS.md?
-
-AGENTS.md is a Markdown file that serves as a "README for agents" - a dedicated, predictable place to provide context and instructions to help AI coding agents work on your project. It complements README.md by containing detailed technical context that coding agents need but might clutter a human-focused README.
-
-## Key Principles
-
-- **Agent-focused**: Contains detailed technical instructions for automated tools
-- **Complements README.md**: Doesn't replace human documentation but adds agent-specific context
-- **Standardized location**: Placed at repository root (or subproject roots for monorepos)
-- **Open format**: Uses standard Markdown with flexible structure
-- **Ecosystem compatibility**: Works across 20+ different AI coding tools and agents
-
-## File Structure and Content Guidelines
-
-### 1. Required Setup
-
-- Create the file as `AGENTS.md` in the repository root
-- Use standard Markdown formatting
-- No required fields - flexible structure based on project needs
-
-### 2. Essential Sections to Include
-
-#### Project Overview
-
-- Brief description of what the project does
-- Architecture overview if complex
-- Key technologies and frameworks used
-
-#### Setup Commands
-
-- Installation instructions
-- Environment setup steps
-- Dependency management commands
-- Database setup if applicable
-
-#### Development Workflow
-
-- How to start development server
-- Build commands
-- Watch/hot-reload setup
-- Package manager specifics (npm, pnpm, yarn, etc.)
-
-#### Testing Instructions
-
-- How to run tests (unit, integration, e2e)
-- Test file locations and naming conventions
-- Coverage requirements
-- Specific test patterns or frameworks used
-- How to run subset of tests or focus on specific areas
-
-#### Code Style Guidelines
-
-- Language-specific conventions
-- Linting and formatting rules
-- File organization patterns
-- Naming conventions
-- Import/export patterns
-
-#### Build and Deployment
-
-- Build commands and outputs
-- Environment configurations
-- Deployment steps and requirements
-- CI/CD pipeline information
-
-### 3. Optional but Recommended Sections
-
-#### Security Considerations
-
-- Security testing requirements
-- Secrets management
-- Authentication patterns
-- Permission models
-
-#### Monorepo Instructions (if applicable)
-
-- How to work with multiple packages
-- Cross-package dependencies
-- Selective building/testing
-- Package-specific commands
-
-#### Pull Request Guidelines
-
-- Title format requirements
-- Required checks before submission
-- Review process
-- Commit message conventions
-
-#### Debugging and Troubleshooting
-
-- Common issues and solutions
-- Logging patterns
-- Debug configuration
-- Performance considerations
-
-## Example Template
-
-Use this as a starting template and customize based on the specific project:
-
-```markdown
-# AGENTS.md
-
-## Project Overview
-
-[Brief description of the project, its purpose, and key technologies]
-
-## Setup Commands
-
-- Install dependencies: `[package manager] install`
-- Start development server: `[command]`
-- Build for production: `[command]`
-
-## Development Workflow
-
-- [Development server startup instructions]
-- [Hot reload/watch mode information]
-- [Environment variable setup]
-
-## Testing Instructions
-
-- Run all tests: `[command]`
-- Run unit tests: `[command]`
-- Run integration tests: `[command]`
-- Test coverage: `[command]`
-- [Specific testing patterns or requirements]
-
-## Code Style
-
-- [Language and framework conventions]
-- [Linting rules and commands]
-- [Formatting requirements]
-- [File organization patterns]
-
-## Build and Deployment
-
-- [Build process details]
-- [Output directories]
-- [Environment-specific builds]
-- [Deployment commands]
-
-## Pull Request Guidelines
-
-- Title format: [component] Brief description
-- Required checks: `[lint command]`, `[test command]`
-- [Review requirements]
-
-## Additional Notes
-
-- [Any project-specific context]
-- [Common gotchas or troubleshooting tips]
-- [Performance considerations]
-```
-
-## Working Example from agents.md
-
-Here's a real example from the agents.md website:
-
-```markdown
-# Sample AGENTS.md file
-
-## Dev environment tips
-
-- Use `pnpm dlx turbo run where ` to jump to a package instead of scanning with `ls`.
-- Run `pnpm install --filter ` to add the package to your workspace so Vite, ESLint, and TypeScript can see it.
-- Use `pnpm create vite@latest -- --template react-ts` to spin up a new React + Vite package with TypeScript checks ready.
-- Check the name field inside each package's package.json to confirm the right nameβskip the top-level one.
-
-## Testing instructions
-
-- Find the CI plan in the .github/workflows folder.
-- Run `pnpm turbo run test --filter ` to run every check defined for that package.
-- From the package root you can just call `pnpm test`. The commit should pass all tests before you merge.
-- To focus on one step, add the Vitest pattern: `pnpm vitest run -t ""`.
-- Fix any test or type errors until the whole suite is green.
-- After moving files or changing imports, run `pnpm lint --filter ` to be sure ESLint and TypeScript rules still pass.
-- Add or update tests for the code you change, even if nobody asked.
-
-## PR instructions
-
-- Title format: []
-- Always run `pnpm lint` and `pnpm test` before committing.
-```
-
-## Implementation Steps
-
-1. **Analyze the project structure** to understand:
-
- - Programming languages and frameworks used
- - Package managers and build tools
- - Testing frameworks
- - Project architecture (monorepo, single package, etc.)
-
-2. **Identify key workflows** by examining:
-
- - package.json scripts
- - Makefile or other build files
- - CI/CD configuration files
- - Documentation files
-
-3. **Create comprehensive sections** covering:
-
- - All essential setup and development commands
- - Testing strategies and commands
- - Code style and conventions
- - Build and deployment processes
-
-4. **Include specific, actionable commands** that agents can execute directly
-
-5. **Test the instructions** by ensuring all commands work as documented
-
-6. **Keep it focused** on what agents need to know, not general project information
-
-## Best Practices
-
-- **Be specific**: Include exact commands, not vague descriptions
-- **Use code blocks**: Wrap commands in backticks for clarity
-- **Include context**: Explain why certain steps are needed
-- **Stay current**: Update as the project evolves
-- **Test commands**: Ensure all listed commands actually work
-- **Consider nested files**: For monorepos, create AGENTS.md files in subprojects as needed
-
-## Monorepo Considerations
-
-For large monorepos:
-
-- Place a main AGENTS.md at the repository root
-- Create additional AGENTS.md files in subproject directories
-- The closest AGENTS.md file takes precedence for any given location
-- Include navigation tips between packages/projects
-
-## Final Notes
-
-- AGENTS.md works with 20+ AI coding tools including Cursor, Aider, Gemini CLI, and many others
-- The format is intentionally flexible - adapt it to your project's needs
-- Focus on actionable instructions that help agents understand and work with your codebase
-- This is living documentation - update it as your project evolves
-
-When creating the AGENTS.md file, prioritize clarity, completeness, and actionability. The goal is to give any coding agent enough context to effectively contribute to the project without requiring additional human guidance.
diff --git a/prompts/create-architectural-decision-record.prompt.md b/prompts/create-architectural-decision-record.prompt.md
deleted file mode 100644
index 5b1840b8..00000000
--- a/prompts/create-architectural-decision-record.prompt.md
+++ /dev/null
@@ -1,97 +0,0 @@
----
-agent: 'agent'
-description: 'Create an Architectural Decision Record (ADR) document for AI-optimized decision documentation.'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'extensions', 'web/fetch', 'githubRepo', 'openSimpleBrowser', 'problems', 'runTasks', 'search', 'search/searchResults', 'runCommands/terminalLastCommand', 'runCommands/terminalSelection', 'testFailure', 'usages', 'vscodeAPI']
----
-# Create Architectural Decision Record
-
-Create an ADR document for `${input:DecisionTitle}` using structured formatting optimized for AI consumption and human readability.
-
-## Inputs
-
-- **Context**: `${input:Context}`
-- **Decision**: `${input:Decision}`
-- **Alternatives**: `${input:Alternatives}`
-- **Stakeholders**: `${input:Stakeholders}`
-
-## Input Validation
-If any of the required inputs are not provided or cannot be determined from the conversation history, ask the user to provide the missing information before proceeding with ADR generation.
-
-## Requirements
-
-- Use precise, unambiguous language
-- Follow standardized ADR format with front matter
-- Include both positive and negative consequences
-- Document alternatives with rejection rationale
-- Structure for machine parsing and human reference
-- Use coded bullet points (3-4 letter codes + 3-digit numbers) for multi-item sections
-
-The ADR must be saved in the `/docs/adr/` directory using the naming convention: `adr-NNNN-[title-slug].md`, where NNNN is the next sequential 4-digit number (e.g., `adr-0001-database-selection.md`).
-
-## Required Documentation Structure
-
-The documentation file must follow the template below, ensuring that all sections are filled out appropriately. The front matter for the markdown should be structured correctly as per the example following:
-
-```md
----
-title: "ADR-NNNN: [Decision Title]"
-status: "Proposed"
-date: "YYYY-MM-DD"
-authors: "[Stakeholder Names/Roles]"
-tags: ["architecture", "decision"]
-supersedes: ""
-superseded_by: ""
----
-
-# ADR-NNNN: [Decision Title]
-
-## Status
-
-**Proposed** | Accepted | Rejected | Superseded | Deprecated
-
-## Context
-
-[Problem statement, technical constraints, business requirements, and environmental factors requiring this decision.]
-
-## Decision
-
-[Chosen solution with clear rationale for selection.]
-
-## Consequences
-
-### Positive
-
-- **POS-001**: [Beneficial outcomes and advantages]
-- **POS-002**: [Performance, maintainability, scalability improvements]
-- **POS-003**: [Alignment with architectural principles]
-
-### Negative
-
-- **NEG-001**: [Trade-offs, limitations, drawbacks]
-- **NEG-002**: [Technical debt or complexity introduced]
-- **NEG-003**: [Risks and future challenges]
-
-## Alternatives Considered
-
-### [Alternative 1 Name]
-
-- **ALT-001**: **Description**: [Brief technical description]
-- **ALT-002**: **Rejection Reason**: [Why this option was not selected]
-
-### [Alternative 2 Name]
-
-- **ALT-003**: **Description**: [Brief technical description]
-- **ALT-004**: **Rejection Reason**: [Why this option was not selected]
-
-## Implementation Notes
-
-- **IMP-001**: [Key implementation considerations]
-- **IMP-002**: [Migration or rollout strategy if applicable]
-- **IMP-003**: [Monitoring and success criteria]
-
-## References
-
-- **REF-001**: [Related ADRs]
-- **REF-002**: [External documentation]
-- **REF-003**: [Standards or frameworks referenced]
-```
diff --git a/prompts/create-github-action-workflow-specification.prompt.md b/prompts/create-github-action-workflow-specification.prompt.md
deleted file mode 100644
index 9979f4e5..00000000
--- a/prompts/create-github-action-workflow-specification.prompt.md
+++ /dev/null
@@ -1,276 +0,0 @@
----
-agent: 'agent'
-description: 'Create a formal specification for an existing GitHub Actions CI/CD workflow, optimized for AI consumption and workflow maintenance.'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'extensions', 'web/fetch', 'findTestFiles', 'githubRepo', 'new', 'openSimpleBrowser', 'problems', 'runCommands', 'runInTerminal2', 'runNotebooks', 'runTasks', 'runTests', 'search', 'search/searchResults', 'runCommands/terminalLastCommand', 'runCommands/terminalSelection', 'testFailure', 'usages', 'vscodeAPI', 'microsoft.docs.mcp', 'github', 'Microsoft Docs']
----
-# Create GitHub Actions Workflow Specification
-
-Create a comprehensive specification for the GitHub Actions workflow: `${input:WorkflowFile}`.
-
-This specification serves as a specification for the workflow's behavior, requirements, and constraints. It must be implementation-agnostic, focusing on **what** the workflow accomplishes rather than **how** it's implemented.
-
-## AI-Optimized Requirements
-
-- **Token Efficiency**: Use concise language without sacrificing clarity
-- **Structured Data**: Leverage tables, lists, and diagrams for dense information
-- **Semantic Clarity**: Use precise terminology consistently throughout
-- **Implementation Abstraction**: Avoid specific syntax, commands, or tool versions
-- **Maintainability**: Design for easy updates as workflow evolves
-
-## Specification Template
-
-Save as: `/spec/spec-process-cicd-[workflow-name].md`
-
-```md
----
-title: CI/CD Workflow Specification - [Workflow Name]
-version: 1.0
-date_created: [YYYY-MM-DD]
-last_updated: [YYYY-MM-DD]
-owner: DevOps Team
-tags: [process, cicd, github-actions, automation, [domain-specific-tags]]
----
-
-## Workflow Overview
-
-**Purpose**: [One sentence describing workflow's primary goal]
-**Trigger Events**: [List trigger conditions]
-**Target Environments**: [Environment scope]
-
-## Execution Flow Diagram
-
-```mermaid
-graph TD
- A[Trigger Event] --> B[Job 1]
- B --> C[Job 2]
- C --> D[Job 3]
- D --> E[End]
-
- B --> F[Parallel Job]
- F --> D
-
- style A fill:#e1f5fe
- style E fill:#e8f5e8
-```
-
-## Jobs & Dependencies
-
-| Job Name | Purpose | Dependencies | Execution Context |
-|----------|---------|--------------|-------------------|
-| job-1 | [Purpose] | [Prerequisites] | [Runner/Environment] |
-| job-2 | [Purpose] | job-1 | [Runner/Environment] |
-
-## Requirements Matrix
-
-### Functional Requirements
-| ID | Requirement | Priority | Acceptance Criteria |
-|----|-------------|----------|-------------------|
-| REQ-001 | [Requirement] | High | [Testable criteria] |
-| REQ-002 | [Requirement] | Medium | [Testable criteria] |
-
-### Security Requirements
-| ID | Requirement | Implementation Constraint |
-|----|-------------|---------------------------|
-| SEC-001 | [Security requirement] | [Constraint description] |
-
-### Performance Requirements
-| ID | Metric | Target | Measurement Method |
-|----|-------|--------|-------------------|
-| PERF-001 | [Metric] | [Target value] | [How measured] |
-
-## Input/Output Contracts
-
-### Inputs
-
-```yaml
-# Environment Variables
-ENV_VAR_1: string # Purpose: [description]
-ENV_VAR_2: secret # Purpose: [description]
-
-# Repository Triggers
-paths: [list of path filters]
-branches: [list of branch patterns]
-```
-
-### Outputs
-
-```yaml
-# Job Outputs
-job_1_output: string # Description: [purpose]
-build_artifact: file # Description: [content type]
-```
-
-### Secrets & Variables
-
-| Type | Name | Purpose | Scope |
-|------|------|---------|-------|
-| Secret | SECRET_1 | [Purpose] | Workflow |
-| Variable | VAR_1 | [Purpose] | Repository |
-
-## Execution Constraints
-
-### Runtime Constraints
-
-- **Timeout**: [Maximum execution time]
-- **Concurrency**: [Parallel execution limits]
-- **Resource Limits**: [Memory/CPU constraints]
-
-### Environmental Constraints
-
-- **Runner Requirements**: [OS/hardware needs]
-- **Network Access**: [External connectivity needs]
-- **Permissions**: [Required access levels]
-
-## Error Handling Strategy
-
-| Error Type | Response | Recovery Action |
-|------------|----------|-----------------|
-| Build Failure | [Response] | [Recovery steps] |
-| Test Failure | [Response] | [Recovery steps] |
-| Deployment Failure | [Response] | [Recovery steps] |
-
-## Quality Gates
-
-### Gate Definitions
-
-| Gate | Criteria | Bypass Conditions |
-|------|----------|-------------------|
-| Code Quality | [Standards] | [When allowed] |
-| Security Scan | [Thresholds] | [When allowed] |
-| Test Coverage | [Percentage] | [When allowed] |
-
-## Monitoring & Observability
-
-### Key Metrics
-
-- **Success Rate**: [Target percentage]
-- **Execution Time**: [Target duration]
-- **Resource Usage**: [Monitoring approach]
-
-### Alerting
-
-| Condition | Severity | Notification Target |
-|-----------|----------|-------------------|
-| [Condition] | [Level] | [Who/Where] |
-
-## Integration Points
-
-### External Systems
-
-| System | Integration Type | Data Exchange | SLA Requirements |
-|--------|------------------|---------------|------------------|
-| [System] | [Type] | [Data format] | [Requirements] |
-
-### Dependent Workflows
-
-| Workflow | Relationship | Trigger Mechanism |
-|----------|--------------|-------------------|
-| [Workflow] | [Type] | [How triggered] |
-
-## Compliance & Governance
-
-### Audit Requirements
-
-- **Execution Logs**: [Retention policy]
-- **Approval Gates**: [Required approvals]
-- **Change Control**: [Update process]
-
-### Security Controls
-
-- **Access Control**: [Permission model]
-- **Secret Management**: [Rotation policy]
-- **Vulnerability Scanning**: [Scan frequency]
-
-## Edge Cases & Exceptions
-
-### Scenario Matrix
-
-| Scenario | Expected Behavior | Validation Method |
-|----------|-------------------|-------------------|
-| [Edge case] | [Behavior] | [How to verify] |
-
-## Validation Criteria
-
-### Workflow Validation
-
-- **VLD-001**: [Validation rule]
-- **VLD-002**: [Validation rule]
-
-### Performance Benchmarks
-
-- **PERF-001**: [Benchmark criteria]
-- **PERF-002**: [Benchmark criteria]
-
-## Change Management
-
-### Update Process
-
-1. **Specification Update**: Modify this document first
-2. **Review & Approval**: [Approval process]
-3. **Implementation**: Apply changes to workflow
-4. **Testing**: [Validation approach]
-5. **Deployment**: [Release process]
-
-### Version History
-
-| Version | Date | Changes | Author |
-|---------|------|---------|--------|
-| 1.0 | [Date] | Initial specification | [Author] |
-
-## Related Specifications
-
-- [Link to related workflow specs]
-- [Link to infrastructure specs]
-- [Link to deployment specs]
-
-```
-
-## Analysis Instructions
-
-When analyzing the workflow file:
-
-1. **Extract Core Purpose**: Identify the primary business objective
-2. **Map Job Flow**: Create dependency graph showing execution order
-3. **Identify Contracts**: Document inputs, outputs, and interfaces
-4. **Capture Constraints**: Extract timeouts, permissions, and limits
-5. **Define Quality Gates**: Identify validation and approval points
-6. **Document Error Paths**: Map failure scenarios and recovery
-7. **Abstract Implementation**: Focus on behavior, not syntax
-
-## Mermaid Diagram Guidelines
-
-### Flow Types
-- **Sequential**: `A --> B --> C`
-- **Parallel**: `A --> B & A --> C; B --> D & C --> D`
-- **Conditional**: `A --> B{Decision}; B -->|Yes| C; B -->|No| D`
-
-### Styling
-```mermaid
-style TriggerNode fill:#e1f5fe
-style SuccessNode fill:#e8f5e8
-style FailureNode fill:#ffebee
-style ProcessNode fill:#f3e5f5
-```
-
-### Complex Workflows
-For workflows with 5+ jobs, use subgraphs:
-```mermaid
-graph TD
- subgraph "Build Phase"
- A[Lint] --> B[Test] --> C[Build]
- end
- subgraph "Deploy Phase"
- D[Staging] --> E[Production]
- end
- C --> D
-```
-
-## Token Optimization Strategies
-
-1. **Use Tables**: Dense information in structured format
-2. **Abbreviate Consistently**: Define once, use throughout
-3. **Bullet Points**: Avoid prose paragraphs
-4. **Code Blocks**: Structured data over narrative
-5. **Cross-Reference**: Link instead of repeat information
-
-Focus on creating a specification that serves as both documentation and a template for workflow updates.
diff --git a/prompts/create-github-issue-feature-from-specification.prompt.md b/prompts/create-github-issue-feature-from-specification.prompt.md
deleted file mode 100644
index f5d7631a..00000000
--- a/prompts/create-github-issue-feature-from-specification.prompt.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-agent: 'agent'
-description: 'Create GitHub Issue for feature request from specification file using feature_request.yml template.'
-tools: ['search/codebase', 'search', 'github', 'create_issue', 'search_issues', 'update_issue']
----
-# Create GitHub Issue from Specification
-
-Create GitHub Issue for the specification at `${file}`.
-
-## Process
-
-1. Analyze specification file to extract requirements
-2. Check existing issues using `search_issues`
-3. Create new issue using `create_issue` or update existing with `update_issue`
-4. Use `feature_request.yml` template (fallback to default)
-
-## Requirements
-
-- Single issue for the complete specification
-- Clear title identifying the specification
-- Include only changes required by the specification
-- Verify against existing issues before creation
-
-## Issue Content
-
-- Title: Feature name from specification
-- Description: Problem statement, proposed solution, and context
-- Labels: feature, enhancement (as appropriate)
diff --git a/prompts/create-github-issues-feature-from-implementation-plan.prompt.md b/prompts/create-github-issues-feature-from-implementation-plan.prompt.md
deleted file mode 100644
index 2c68b226..00000000
--- a/prompts/create-github-issues-feature-from-implementation-plan.prompt.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-agent: 'agent'
-description: 'Create GitHub Issues from implementation plan phases using feature_request.yml or chore_request.yml templates.'
-tools: ['search/codebase', 'search', 'github', 'create_issue', 'search_issues', 'update_issue']
----
-# Create GitHub Issue from Implementation Plan
-
-Create GitHub Issues for the implementation plan at `${file}`.
-
-## Process
-
-1. Analyze plan file to identify phases
-2. Check existing issues using `search_issues`
-3. Create new issue per phase using `create_issue` or update existing with `update_issue`
-4. Use `feature_request.yml` or `chore_request.yml` templates (fallback to default)
-
-## Requirements
-
-- One issue per implementation phase
-- Clear, structured titles and descriptions
-- Include only changes required by the plan
-- Verify against existing issues before creation
-
-## Issue Content
-
-- Title: Phase name from implementation plan
-- Description: Phase details, requirements, and context
-- Labels: Appropriate for issue type (feature/chore)
diff --git a/prompts/create-github-issues-for-unmet-specification-requirements.prompt.md b/prompts/create-github-issues-for-unmet-specification-requirements.prompt.md
deleted file mode 100644
index 02a9e8aa..00000000
--- a/prompts/create-github-issues-for-unmet-specification-requirements.prompt.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-agent: 'agent'
-description: 'Create GitHub Issues for unimplemented requirements from specification files using feature_request.yml template.'
-tools: ['search/codebase', 'search', 'github', 'create_issue', 'search_issues', 'update_issue']
----
-# Create GitHub Issues for Unmet Specification Requirements
-
-Create GitHub Issues for unimplemented requirements in the specification at `${file}`.
-
-## Process
-
-1. Analyze specification file to extract all requirements
-2. Check codebase implementation status for each requirement
-3. Search existing issues using `search_issues` to avoid duplicates
-4. Create new issue per unimplemented requirement using `create_issue`
-5. Use `feature_request.yml` template (fallback to default)
-
-## Requirements
-
-- One issue per unimplemented requirement from specification
-- Clear requirement ID and description mapping
-- Include implementation guidance and acceptance criteria
-- Verify against existing issues before creation
-
-## Issue Content
-
-- Title: Requirement ID and brief description
-- Description: Detailed requirement, implementation method, and context
-- Labels: feature, enhancement (as appropriate)
-
-## Implementation Check
-
-- Search codebase for related code patterns
-- Check related specification files in `/spec/` directory
-- Verify requirement isn't partially implemented
diff --git a/prompts/create-github-pull-request-from-specification.prompt.md b/prompts/create-github-pull-request-from-specification.prompt.md
deleted file mode 100644
index 4eb780d2..00000000
--- a/prompts/create-github-pull-request-from-specification.prompt.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-agent: 'agent'
-description: 'Create GitHub Pull Request for feature request from specification file using pull_request_template.md template.'
-tools: ['search/codebase', 'search', 'github', 'create_pull_request', 'update_pull_request', 'get_pull_request_diff']
----
-# Create GitHub Pull Request from Specification
-
-Create GitHub Pull Request for the specification at `${workspaceFolder}/.github/pull_request_template.md` .
-
-## Process
-
-1. Analyze specification file template from '${workspaceFolder}/.github/pull_request_template.md' to extract requirements by 'search' tool.
-2. Create pull request draft template by using 'create_pull_request' tool on to `${input:targetBranch}`. and make sure don't have any pull request of current branch was exist `get_pull_request`. If has continue to step 4, and skip step 3.
-3. Get changes in pull request by using 'get_pull_request_diff' tool to analyze information that was changed in pull Request.
-4. Update the pull request body and title created in the previous step using the 'update_pull_request' tool. Incorporate the information from the template obtained in the first step to update the body and title as needed.
-5. Switch from draft to ready for review by using 'update_pull_request' tool. To update state of pull request.
-6. Using 'get_me' to get username of person was created pull request and assign to `update_issue` tool. To assign pull request
-7. Response URL Pull request was create to user.
-
-## Requirements
-- Single pull request for the complete specification
-- Clear title/pull_request_template.md identifying the specification
-- Fill enough information into pull_request_template.md
-- Verify against existing pull requests before creation
diff --git a/prompts/create-implementation-plan.prompt.md b/prompts/create-implementation-plan.prompt.md
deleted file mode 100644
index ffc0bc0f..00000000
--- a/prompts/create-implementation-plan.prompt.md
+++ /dev/null
@@ -1,157 +0,0 @@
----
-agent: 'agent'
-description: 'Create a new implementation plan file for new features, refactoring existing code or upgrading packages, design, architecture or infrastructure.'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'extensions', 'web/fetch', 'githubRepo', 'openSimpleBrowser', 'problems', 'runTasks', 'search', 'search/searchResults', 'runCommands/terminalLastCommand', 'runCommands/terminalSelection', 'testFailure', 'usages', 'vscodeAPI']
----
-# Create Implementation Plan
-
-## Primary Directive
-
-Your goal is to create a new implementation plan file for `${input:PlanPurpose}`. Your output must be machine-readable, deterministic, and structured for autonomous execution by other AI systems or humans.
-
-## Execution Context
-
-This prompt is designed for AI-to-AI communication and automated processing. All instructions must be interpreted literally and executed systematically without human interpretation or clarification.
-
-## Core Requirements
-
-- Generate implementation plans that are fully executable by AI agents or humans
-- Use deterministic language with zero ambiguity
-- Structure all content for automated parsing and execution
-- Ensure complete self-containment with no external dependencies for understanding
-
-## Plan Structure Requirements
-
-Plans must consist of discrete, atomic phases containing executable tasks. Each phase must be independently processable by AI agents or humans without cross-phase dependencies unless explicitly declared.
-
-## Phase Architecture
-
-- Each phase must have measurable completion criteria
-- Tasks within phases must be executable in parallel unless dependencies are specified
-- All task descriptions must include specific file paths, function names, and exact implementation details
-- No task should require human interpretation or decision-making
-
-## AI-Optimized Implementation Standards
-
-- Use explicit, unambiguous language with zero interpretation required
-- Structure all content as machine-parseable formats (tables, lists, structured data)
-- Include specific file paths, line numbers, and exact code references where applicable
-- Define all variables, constants, and configuration values explicitly
-- Provide complete context within each task description
-- Use standardized prefixes for all identifiers (REQ-, TASK-, etc.)
-- Include validation criteria that can be automatically verified
-
-## Output File Specifications
-
-- Save implementation plan files in `/plan/` directory
-- Use naming convention: `[purpose]-[component]-[version].md`
-- Purpose prefixes: `upgrade|refactor|feature|data|infrastructure|process|architecture|design`
-- Example: `upgrade-system-command-4.md`, `feature-auth-module-1.md`
-- File must be valid Markdown with proper front matter structure
-
-## Mandatory Template Structure
-
-All implementation plans must strictly adhere to the following template. Each section is required and must be populated with specific, actionable content. AI agents must validate template compliance before execution.
-
-## Template Validation Rules
-
-- All front matter fields must be present and properly formatted
-- All section headers must match exactly (case-sensitive)
-- All identifier prefixes must follow the specified format
-- Tables must include all required columns
-- No placeholder text may remain in the final output
-
-## Status
-
-The status of the implementation plan must be clearly defined in the front matter and must reflect the current state of the plan. The status can be one of the following (status_color in brackets): `Completed` (bright green badge), `In progress` (yellow badge), `Planned` (blue badge), `Deprecated` (red badge), or `On Hold` (orange badge). It should also be displayed as a badge in the introduction section.
-
-```md
----
-goal: [Concise Title Describing the Package Implementation Plan's Goal]
-version: [Optional: e.g., 1.0, Date]
-date_created: [YYYY-MM-DD]
-last_updated: [Optional: YYYY-MM-DD]
-owner: [Optional: Team/Individual responsible for this spec]
-status: 'Completed'|'In progress'|'Planned'|'Deprecated'|'On Hold'
-tags: [Optional: List of relevant tags or categories, e.g., `feature`, `upgrade`, `chore`, `architecture`, `migration`, `bug` etc]
----
-
-# Introduction
-
-
-
-[A short concise introduction to the plan and the goal it is intended to achieve.]
-
-## 1. Requirements & Constraints
-
-[Explicitly list all requirements & constraints that affect the plan and constrain how it is implemented. Use bullet points or tables for clarity.]
-
-- **REQ-001**: Requirement 1
-- **SEC-001**: Security Requirement 1
-- **[3 LETTERS]-001**: Other Requirement 1
-- **CON-001**: Constraint 1
-- **GUD-001**: Guideline 1
-- **PAT-001**: Pattern to follow 1
-
-## 2. Implementation Steps
-
-### Implementation Phase 1
-
-- GOAL-001: [Describe the goal of this phase, e.g., "Implement feature X", "Refactor module Y", etc.]
-
-| Task | Description | Completed | Date |
-|------|-------------|-----------|------|
-| TASK-001 | Description of task 1 | β | 2025-04-25 |
-| TASK-002 | Description of task 2 | | |
-| TASK-003 | Description of task 3 | | |
-
-### Implementation Phase 2
-
-- GOAL-002: [Describe the goal of this phase, e.g., "Implement feature X", "Refactor module Y", etc.]
-
-| Task | Description | Completed | Date |
-|------|-------------|-----------|------|
-| TASK-004 | Description of task 4 | | |
-| TASK-005 | Description of task 5 | | |
-| TASK-006 | Description of task 6 | | |
-
-## 3. Alternatives
-
-[A bullet point list of any alternative approaches that were considered and why they were not chosen. This helps to provide context and rationale for the chosen approach.]
-
-- **ALT-001**: Alternative approach 1
-- **ALT-002**: Alternative approach 2
-
-## 4. Dependencies
-
-[List any dependencies that need to be addressed, such as libraries, frameworks, or other components that the plan relies on.]
-
-- **DEP-001**: Dependency 1
-- **DEP-002**: Dependency 2
-
-## 5. Files
-
-[List the files that will be affected by the feature or refactoring task.]
-
-- **FILE-001**: Description of file 1
-- **FILE-002**: Description of file 2
-
-## 6. Testing
-
-[List the tests that need to be implemented to verify the feature or refactoring task.]
-
-- **TEST-001**: Description of test 1
-- **TEST-002**: Description of test 2
-
-## 7. Risks & Assumptions
-
-[List any risks or assumptions related to the implementation of the plan.]
-
-- **RISK-001**: Risk 1
-- **ASSUMPTION-001**: Assumption 1
-
-## 8. Related Specifications / Further Reading
-
-[Link to related spec 1]
-[Link to relevant external documentation]
-```
diff --git a/prompts/create-llms.prompt.md b/prompts/create-llms.prompt.md
deleted file mode 100644
index c9e5e58f..00000000
--- a/prompts/create-llms.prompt.md
+++ /dev/null
@@ -1,210 +0,0 @@
----
-agent: 'agent'
-description: 'Create an llms.txt file from scratch based on repository structure following the llms.txt specification at https://llmstxt.org/'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'extensions', 'web/fetch', 'githubRepo', 'openSimpleBrowser', 'problems', 'runTasks', 'search', 'search/searchResults', 'runCommands/terminalLastCommand', 'runCommands/terminalSelection', 'testFailure', 'usages', 'vscodeAPI']
----
-# Create LLMs.txt File from Repository Structure
-
-Create a new `llms.txt` file from scratch in the root of the repository following the official llms.txt specification at https://llmstxt.org/. This file provides high-level guidance to large language models (LLMs) on where to find relevant content for understanding the repository's purpose and specifications.
-
-## Primary Directive
-
-Create a comprehensive `llms.txt` file that serves as an entry point for LLMs to understand and navigate the repository effectively. The file must comply with the llms.txt specification and be optimized for LLM consumption while remaining human-readable.
-
-## Analysis and Planning Phase
-
-Before creating the `llms.txt` file, you must complete a thorough analysis:
-
-### Step 1: Review llms.txt Specification
-
-- Review the official specification at https://llmstxt.org/ to ensure full compliance
-- Understand the required format structure and guidelines
-- Note the specific markdown structure requirements
-
-### Step 2: Repository Structure Analysis
-
-- Examine the complete repository structure using appropriate tools
-- Identify the primary purpose and scope of the repository
-- Catalog all important directories and their purposes
-- List key files that would be valuable for LLM understanding
-
-### Step 3: Content Discovery
-
-- Identify README files and their locations
-- Find documentation files (`.md` files in `/docs/`, `/spec/`, etc.)
-- Locate specification files and their purposes
-- Discover configuration files and their relevance
-- Find example files and code samples
-- Identify any existing documentation structure
-
-### Step 4: Create Implementation Plan
-
-Based on your analysis, create a structured plan that includes:
-
-- Repository purpose and scope summary
-- Priority-ordered list of essential files for LLM understanding
-- Secondary files that provide additional context
-- Organizational structure for the llms.txt file
-
-## Implementation Requirements
-
-### Format Compliance
-
-The `llms.txt` file must follow this exact structure per the specification:
-
-1. **H1 Header**: Single line with repository/project name (required)
-2. **Blockquote Summary**: Brief description in blockquote format (optional but recommended)
-3. **Additional Details**: Zero or more markdown sections without headings for context
-4. **File List Sections**: Zero or more H2 sections containing markdown lists of links
-
-### Content Requirements
-
-#### Required Elements
-
-- **Project Name**: Clear, descriptive title as H1
-- **Summary**: Concise blockquote explaining the repository's purpose
-- **Key Files**: Essential files organized by category (H2 sections)
-
-#### File Link Format
-
-Each file link must follow: `[descriptive-name](relative-url): optional description`
-
-#### Section Organization
-
-Organize files into logical H2 sections such as:
-
-- **Documentation**: Core documentation files
-- **Specifications**: Technical specifications and requirements
-- **Examples**: Sample code and usage examples
-- **Configuration**: Setup and configuration files
-- **Optional**: Secondary files (special meaning - can be skipped for shorter context)
-
-### Content Guidelines
-
-#### Language and Style
-
-- Use concise, clear, unambiguous language
-- Avoid jargon without explanation
-- Write for both human and LLM readers
-- Be specific and informative in descriptions
-
-#### File Selection Criteria
-
-Include files that:
-- Explain the repository's purpose and scope
-- Provide essential technical documentation
-- Show usage examples and patterns
-- Define interfaces and specifications
-- Contain configuration and setup instructions
-
-Exclude files that:
-- Are purely implementation details
-- Contain redundant information
-- Are build artifacts or generated content
-- Are not relevant to understanding the project
-
-## Execution Steps
-
-### Step 1: Repository Analysis
-
-1. Examine the repository structure completely
-2. Read the main README.md to understand the project
-3. Identify all documentation directories and files
-4. Catalog specification files and their purposes
-5. Find example files and configuration files
-
-### Step 2: Content Planning
-
-1. Determine the primary purpose statement
-2. Write a concise summary for the blockquote
-3. Group identified files into logical categories
-4. Prioritize files by importance for LLM understanding
-5. Create descriptions for each file link
-
-### Step 3: File Creation
-
-1. Create the `llms.txt` file in the repository root
-2. Follow the exact format specification
-3. Include all required sections
-4. Use proper markdown formatting
-5. Ensure all links are valid relative paths
-
-### Step 4: Validation
-1. Verify compliance with https://llmstxt.org/ specification
-2. Check that all links are valid and accessible
-3. Ensure the file serves as an effective LLM navigation tool
-4. Confirm the file is both human and machine readable
-
-## Quality Assurance
-
-### Format Validation
-
-- β H1 header with project name
-- β Blockquote summary (if included)
-- β H2 sections for file lists
-- β Proper markdown link format
-- β No broken or invalid links
-- β Consistent formatting throughout
-
-### Content Validation
-
-- β Clear, unambiguous language
-- β Comprehensive coverage of essential files
-- β Logical organization of content
-- β Appropriate file descriptions
-- β Serves as effective LLM navigation tool
-
-### Specification Compliance
-
-- β Follows https://llmstxt.org/ format exactly
-- β Uses required markdown structure
-- β Implements optional sections appropriately
-- β File located at repository root (`/llms.txt`)
-
-## Example Structure Template
-
-```txt
-# [Repository Name]
-
-> [Concise description of the repository's purpose and scope]
-
-[Optional additional context paragraphs without headings]
-
-## Documentation
-
-- [Main README](README.md): Primary project documentation and getting started guide
-- [Contributing Guide](CONTRIBUTING.md): Guidelines for contributing to the project
-- [Code of Conduct](CODE_OF_CONDUCT.md): Community guidelines and expectations
-
-## Specifications
-
-- [Technical Specification](spec/technical-spec.md): Detailed technical requirements and constraints
-- [API Specification](spec/api-spec.md): Interface definitions and data contracts
-
-## Examples
-
-- [Basic Example](examples/basic-usage.md): Simple usage demonstration
-- [Advanced Example](examples/advanced-usage.md): Complex implementation patterns
-
-## Configuration
-
-- [Setup Guide](docs/setup.md): Installation and configuration instructions
-- [Deployment Guide](docs/deployment.md): Production deployment guidelines
-
-## Optional
-
-- [Architecture Documentation](docs/architecture.md): Detailed system architecture
-- [Design Decisions](docs/decisions.md): Historical design decision records
-```
-
-## Success Criteria
-
-The created `llms.txt` file should:
-1. Enable LLMs to quickly understand the repository's purpose
-2. Provide clear navigation to essential documentation
-3. Follow the official llms.txt specification exactly
-4. Be comprehensive yet concise
-5. Serve both human and machine readers effectively
-6. Include all critical files for project understanding
-7. Use clear, unambiguous language throughout
-8. Organize content logically for easy consumption
diff --git a/prompts/create-oo-component-documentation.prompt.md b/prompts/create-oo-component-documentation.prompt.md
deleted file mode 100644
index 33bb0cf9..00000000
--- a/prompts/create-oo-component-documentation.prompt.md
+++ /dev/null
@@ -1,193 +0,0 @@
----
-agent: 'agent'
-description: 'Create comprehensive, standardized documentation for object-oriented components following industry best practices and architectural documentation standards.'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'extensions', 'web/fetch', 'githubRepo', 'openSimpleBrowser', 'problems', 'runTasks', 'search', 'search/searchResults', 'runCommands/terminalLastCommand', 'runCommands/terminalSelection', 'testFailure', 'usages', 'vscodeAPI']
----
-# Generate Standard OO Component Documentation
-
-Create comprehensive documentation for the object-oriented component(s) at: `${input:ComponentPath}`.
-
-Analyze the component by examining code in the provided path. If folder, analyze all source files. If single file, treat as main component and analyze related files in same directory.
-
-## Documentation Standards
-
-- DOC-001: Follow C4 Model documentation levels (Context, Containers, Components, Code)
-- DOC-002: Align with Arc42 software architecture documentation template
-- DOC-003: Comply with IEEE 1016 Software Design Description standard
-- DOC-004: Use Agile Documentation principles (just enough documentation that adds value)
-- DOC-005: Target developers and maintainers as primary audience
-
-## Analysis Instructions
-
-- ANA-001: Determine path type (folder vs single file) and identify primary component
-- ANA-002: Examine source code files for class structures and inheritance
-- ANA-003: Identify design patterns and architectural decisions
-- ANA-004: Document public APIs, interfaces, and dependencies
-- ANA-005: Recognize creational/structural/behavioral patterns
-- ANA-006: Document method parameters, return values, exceptions
-- ANA-007: Assess performance, security, reliability, maintainability
-- ANA-008: Infer integration patterns and data flow
-
-## Language-Specific Optimizations
-
-- LNG-001: **C#/.NET** - async/await, dependency injection, configuration, disposal
-- LNG-002: **Java** - Spring framework, annotations, exception handling, packaging
-- LNG-003: **TypeScript/JavaScript** - modules, async patterns, types, npm
-- LNG-004: **Python** - packages, virtual environments, type hints, testing
-
-## Error Handling
-
-- ERR-001: Path doesn't exist - provide correct format guidance
-- ERR-002: No source files found - suggest alternative locations
-- ERR-003: Unclear structure - document findings and request clarification
-- ERR-004: Non-standard patterns - document custom approaches
-- ERR-005: Insufficient code - focus on available information, highlight gaps
-
-## Output Format
-
-Generate well-structured Markdown with clear heading hierarchy, code blocks, tables, bullet points, and proper formatting for readability and maintainability.
-
-## File Location
-
-The documentation should be saved in the `/docs/components/` directory and named according to the convention: `[component-name]-documentation.md`.
-
-## Required Documentation Structure
-
-The documentation file must follow the template below, ensuring that all sections are filled out appropriately. The front matter for the markdown should be structured correctly as per the example following:
-
-```md
----
-title: [Component Name] - Technical Documentation
-component_path: `${input:ComponentPath}`
-version: [Optional: e.g., 1.0, Date]
-date_created: [YYYY-MM-DD]
-last_updated: [Optional: YYYY-MM-DD]
-owner: [Optional: Team/Individual responsible for this component]
-tags: [Optional: List of relevant tags or categories, e.g., `component`,`service`,`tool`,`infrastructure`,`documentation`,`architecture` etc]
----
-
-# [Component Name] Documentation
-
-[A short concise introduction to the component and its purpose within the system.]
-
-## 1. Component Overview
-
-### Purpose/Responsibility
-- OVR-001: State component's primary responsibility
-- OVR-002: Define scope (included/excluded functionality)
-- OVR-003: Describe system context and relationships
-
-## 2. Architecture Section
-
-- ARC-001: Document design patterns used (Repository, Factory, Observer, etc.)
-- ARC-002: List internal and external dependencies with purposes
-- ARC-003: Document component interactions and relationships
-- ARC-004: Include visual diagrams (UML class, sequence, component)
-- ARC-005: Create mermaid diagram showing component structure, relationships, and dependencies
-
-### Component Structure and Dependencies Diagram
-
-Include a comprehensive mermaid diagram that shows:
-- **Component structure** - Main classes, interfaces, and their relationships
-- **Internal dependencies** - How components interact within the system
-- **External dependencies** - External libraries, services, databases, APIs
-- **Data flow** - Direction of dependencies and interactions
-- **Inheritance/composition** - Class hierarchies and composition relationships
-
-```mermaid
-graph TD
- subgraph "Component System"
- A[Main Component] --> B[Internal Service]
- A --> C[Internal Repository]
- B --> D[Business Logic]
- C --> E[Data Access Layer]
- end
-
- subgraph "External Dependencies"
- F[External API]
- G[Database]
- H[Third-party Library]
- I[Configuration Service]
- end
-
- A --> F
- E --> G
- B --> H
- A --> I
-
- classDiagram
- class MainComponent {
- +property: Type
- +method(): ReturnType
- +asyncMethod(): Promise~Type~
- }
- class InternalService {
- +businessOperation(): Result
- }
- class ExternalAPI {
- <>
- +apiCall(): Data
- }
-
- MainComponent --> InternalService
- MainComponent --> ExternalAPI
-```
-
-## 3. Interface Documentation
-
-- INT-001: Document all public interfaces and usage patterns
-- INT-002: Create method/property reference table
-- INT-003: Document events/callbacks/notification mechanisms
-
-| Method/Property | Purpose | Parameters | Return Type | Usage Notes |
-|-----------------|---------|------------|-------------|-------------|
-| [Name] | [Purpose] | [Parameters] | [Type] | [Notes] |
-
-## 4. Implementation Details
-
-- IMP-001: Document main implementation classes and responsibilities
-- IMP-002: Describe configuration requirements and initialization
-- IMP-003: Document key algorithms and business logic
-- IMP-004: Note performance characteristics and bottlenecks
-
-## 5. Usage Examples
-
-### Basic Usage
-
-```csharp
-// Basic usage example
-var component = new ComponentName();
-component.DoSomething();
-```
-
-### Advanced Usage
-
-```csharp
-// Advanced configuration patterns
-var options = new ComponentOptions();
-var component = ComponentFactory.Create(options);
-await component.ProcessAsync(data);
-```
-
-- USE-001: Provide basic usage examples
-- USE-002: Show advanced configuration patterns
-- USE-003: Document best practices and recommended patterns
-
-## 6. Quality Attributes
-
-- QUA-001: Security (authentication, authorization, data protection)
-- QUA-002: Performance (characteristics, scalability, resource usage)
-- QUA-003: Reliability (error handling, fault tolerance, recovery)
-- QUA-004: Maintainability (standards, testing, documentation)
-- QUA-005: Extensibility (extension points, customization options)
-
-## 7. Reference Information
-
-- REF-001: List dependencies with versions and purposes
-- REF-002: Complete configuration options reference
-- REF-003: Testing guidelines and mock setup
-- REF-004: Troubleshooting (common issues, error messages)
-- REF-005: Related documentation links
-- REF-006: Change history and migration notes
-
-```
diff --git a/prompts/create-readme.prompt.md b/prompts/create-readme.prompt.md
deleted file mode 100644
index 1a92ca1a..00000000
--- a/prompts/create-readme.prompt.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-agent: 'agent'
-description: 'Create a README.md file for the project'
----
-
-## Role
-
-You're a senior expert software engineer with extensive experience in open source projects. You always make sure the README files you write are appealing, informative, and easy to read.
-
-## Task
-
-1. Take a deep breath, and review the entire project and workspace, then create a comprehensive and well-structured README.md file for the project.
-2. Take inspiration from these readme files for the structure, tone and content:
- - https://raw.githubusercontent.com/Azure-Samples/serverless-chat-langchainjs/refs/heads/main/README.md
- - https://raw.githubusercontent.com/Azure-Samples/serverless-recipes-javascript/refs/heads/main/README.md
- - https://raw.githubusercontent.com/sinedied/run-on-output/refs/heads/main/README.md
- - https://raw.githubusercontent.com/sinedied/smoke/refs/heads/main/README.md
-3. Do not overuse emojis, and keep the readme concise and to the point.
-4. Do not include sections like "LICENSE", "CONTRIBUTING", "CHANGELOG", etc. There are dedicated files for those sections.
-5. Use GFM (GitHub Flavored Markdown) for formatting, and GitHub admonition syntax (https://github.com/orgs/community/discussions/16925) where appropriate.
-6. If you find a logo or icon for the project, use it in the readme's header.
diff --git a/prompts/create-specification.prompt.md b/prompts/create-specification.prompt.md
deleted file mode 100644
index 08093e04..00000000
--- a/prompts/create-specification.prompt.md
+++ /dev/null
@@ -1,127 +0,0 @@
----
-agent: 'agent'
-description: 'Create a new specification file for the solution, optimized for Generative AI consumption.'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'extensions', 'web/fetch', 'githubRepo', 'openSimpleBrowser', 'problems', 'runTasks', 'search', 'search/searchResults', 'runCommands/terminalLastCommand', 'runCommands/terminalSelection', 'testFailure', 'usages', 'vscodeAPI']
----
-# Create Specification
-
-Your goal is to create a new specification file for `${input:SpecPurpose}`.
-
-The specification file must define the requirements, constraints, and interfaces for the solution components in a manner that is clear, unambiguous, and structured for effective use by Generative AIs. Follow established documentation standards and ensure the content is machine-readable and self-contained.
-
-## Best Practices for AI-Ready Specifications
-
-- Use precise, explicit, and unambiguous language.
-- Clearly distinguish between requirements, constraints, and recommendations.
-- Use structured formatting (headings, lists, tables) for easy parsing.
-- Avoid idioms, metaphors, or context-dependent references.
-- Define all acronyms and domain-specific terms.
-- Include examples and edge cases where applicable.
-- Ensure the document is self-contained and does not rely on external context.
-
-The specification should be saved in the [/spec/](/spec/) directory and named according to the following convention: `spec-[a-z0-9-]+.md`, where the name should be descriptive of the specification's content and starting with the highlevel purpose, which is one of [schema, tool, data, infrastructure, process, architecture, or design].
-
-The specification file must be formatted in well formed Markdown.
-
-Specification files must follow the template below, ensuring that all sections are filled out appropriately. The front matter for the markdown should be structured correctly as per the example following:
-
-```md
----
-title: [Concise Title Describing the Specification's Focus]
-version: [Optional: e.g., 1.0, Date]
-date_created: [YYYY-MM-DD]
-last_updated: [Optional: YYYY-MM-DD]
-owner: [Optional: Team/Individual responsible for this spec]
-tags: [Optional: List of relevant tags or categories, e.g., `infrastructure`, `process`, `design`, `app` etc]
----
-
-# Introduction
-
-[A short concise introduction to the specification and the goal it is intended to achieve.]
-
-## 1. Purpose & Scope
-
-[Provide a clear, concise description of the specification's purpose and the scope of its application. State the intended audience and any assumptions.]
-
-## 2. Definitions
-
-[List and define all acronyms, abbreviations, and domain-specific terms used in this specification.]
-
-## 3. Requirements, Constraints & Guidelines
-
-[Explicitly list all requirements, constraints, rules, and guidelines. Use bullet points or tables for clarity.]
-
-- **REQ-001**: Requirement 1
-- **SEC-001**: Security Requirement 1
-- **[3 LETTERS]-001**: Other Requirement 1
-- **CON-001**: Constraint 1
-- **GUD-001**: Guideline 1
-- **PAT-001**: Pattern to follow 1
-
-## 4. Interfaces & Data Contracts
-
-[Describe the interfaces, APIs, data contracts, or integration points. Use tables or code blocks for schemas and examples.]
-
-## 5. Acceptance Criteria
-
-[Define clear, testable acceptance criteria for each requirement using Given-When-Then format where appropriate.]
-
-- **AC-001**: Given [context], When [action], Then [expected outcome]
-- **AC-002**: The system shall [specific behavior] when [condition]
-- **AC-003**: [Additional acceptance criteria as needed]
-
-## 6. Test Automation Strategy
-
-[Define the testing approach, frameworks, and automation requirements.]
-
-- **Test Levels**: Unit, Integration, End-to-End
-- **Frameworks**: MSTest, FluentAssertions, Moq (for .NET applications)
-- **Test Data Management**: [approach for test data creation and cleanup]
-- **CI/CD Integration**: [automated testing in GitHub Actions pipelines]
-- **Coverage Requirements**: [minimum code coverage thresholds]
-- **Performance Testing**: [approach for load and performance testing]
-
-## 7. Rationale & Context
-
-[Explain the reasoning behind the requirements, constraints, and guidelines. Provide context for design decisions.]
-
-## 8. Dependencies & External Integrations
-
-[Define the external systems, services, and architectural dependencies required for this specification. Focus on **what** is needed rather than **how** it's implemented. Avoid specific package or library versions unless they represent architectural constraints.]
-
-### External Systems
-- **EXT-001**: [External system name] - [Purpose and integration type]
-
-### Third-Party Services
-- **SVC-001**: [Service name] - [Required capabilities and SLA requirements]
-
-### Infrastructure Dependencies
-- **INF-001**: [Infrastructure component] - [Requirements and constraints]
-
-### Data Dependencies
-- **DAT-001**: [External data source] - [Format, frequency, and access requirements]
-
-### Technology Platform Dependencies
-- **PLT-001**: [Platform/runtime requirement] - [Version constraints and rationale]
-
-### Compliance Dependencies
-- **COM-001**: [Regulatory or compliance requirement] - [Impact on implementation]
-
-**Note**: This section should focus on architectural and business dependencies, not specific package implementations. For example, specify "OAuth 2.0 authentication library" rather than "Microsoft.AspNetCore.Authentication.JwtBearer v6.0.1".
-
-## 9. Examples & Edge Cases
-
- ```code
- // Code snippet or data example demonstrating the correct application of the guidelines, including edge cases
- ```
-
-## 10. Validation Criteria
-
-[List the criteria or tests that must be satisfied for compliance with this specification.]
-
-## 11. Related Specifications / Further Reading
-
-[Link to related spec 1]
-[Link to relevant external documentation]
-
-```
diff --git a/prompts/create-spring-boot-java-project.prompt.md b/prompts/create-spring-boot-java-project.prompt.md
deleted file mode 100644
index 4d227e89..00000000
--- a/prompts/create-spring-boot-java-project.prompt.md
+++ /dev/null
@@ -1,163 +0,0 @@
----
-agent: 'agent'
-description: 'Create Spring Boot Java Project Skeleton'
----
-
-# Create Spring Boot Java project prompt
-
-- Please make sure you have the following software installed on your system:
-
- - Java 21
- - Docker
- - Docker Compose
-
-- If you need to custom the project name, please change the `artifactId` and the `packageName` in [download-spring-boot-project-template](./create-spring-boot-java-project.prompt.md#download-spring-boot-project-template)
-
-- If you need to update the Spring Boot version, please change the `bootVersion` in [download-spring-boot-project-template](./create-spring-boot-java-project.prompt.md#download-spring-boot-project-template)
-
-## Check Java version
-
-- Run following command in terminal and check the version of Java
-
-```shell
-java -version
-```
-
-## Download Spring Boot project template
-
-- Run following command in terminal to download a Spring Boot project template
-
-```shell
-curl https://start.spring.io/starter.zip \
- -d artifactId=${input:projectName:demo-java} \
- -d bootVersion=3.4.5 \
- -d dependencies=lombok,configuration-processor,web,data-jpa,postgresql,data-redis,data-mongodb,validation,cache,testcontainers \
- -d javaVersion=21 \
- -d packageName=com.example \
- -d packaging=jar \
- -d type=maven-project \
- -o starter.zip
-```
-
-## Unzip the downloaded file
-
-- Run following command in terminal to unzip the downloaded file
-
-```shell
-unzip starter.zip -d ./${input:projectName:demo-java}
-```
-
-## Remove the downloaded zip file
-
-- Run following command in terminal to delete the downloaded zip file
-
-```shell
-rm -f starter.zip
-```
-
-## Change directory to the project root
-
-- Run following command in terminal to change directory to the project root
-
-```shell
-cd ${input:projectName:demo-java}
-```
-
-## Add additional dependencies
-
-- Insert `springdoc-openapi-starter-webmvc-ui` and `archunit-junit5` dependency into `pom.xml` file
-
-```xml
-
- org.springdoc
- springdoc-openapi-starter-webmvc-ui
- 2.8.6
-
-
- com.tngtech.archunit
- archunit-junit5
- 1.2.1
- test
-
-```
-
-## Add SpringDoc, Redis, JPA and MongoDB configurations
-
-- Insert SpringDoc configurations into `application.properties` file
-
-```properties
-# SpringDoc configurations
-springdoc.swagger-ui.doc-expansion=none
-springdoc.swagger-ui.operations-sorter=alpha
-springdoc.swagger-ui.tags-sorter=alpha
-```
-
-- Insert Redis configurations into `application.properties` file
-
-```properties
-# Redis configurations
-spring.data.redis.host=localhost
-spring.data.redis.port=6379
-spring.data.redis.password=rootroot
-```
-
-- Insert JPA configurations into `application.properties` file
-
-```properties
-# JPA configurations
-spring.datasource.driver-class-name=org.postgresql.Driver
-spring.datasource.url=jdbc:postgresql://localhost:5432/postgres
-spring.datasource.username=postgres
-spring.datasource.password=rootroot
-spring.jpa.hibernate.ddl-auto=update
-spring.jpa.show-sql=true
-spring.jpa.properties.hibernate.format_sql=true
-```
-
-- Insert MongoDB configurations into `application.properties` file
-
-```properties
-# MongoDB configurations
-spring.data.mongodb.host=localhost
-spring.data.mongodb.port=27017
-spring.data.mongodb.authentication-database=admin
-spring.data.mongodb.username=root
-spring.data.mongodb.password=rootroot
-spring.data.mongodb.database=test
-```
-
-## Add `docker-compose.yaml` with Redis, PostgreSQL and MongoDB services
-
-- Create `docker-compose.yaml` at project root and add following services: `redis:6`, `postgresql:17` and `mongo:8`.
-
- - redis service should have
- - password `rootroot`
- - mapping port 6379 to 6379
- - mounting volume `./redis_data` to `/data`
- - postgresql service should have
- - password `rootroot`
- - mapping port 5432 to 5432
- - mounting volume `./postgres_data` to `/var/lib/postgresql/data`
- - mongo service should have
- - initdb root username `root`
- - initdb root password `rootroot`
- - mapping port 27017 to 27017
- - mounting volume `./mongo_data` to `/data/db`
-
-## Add `.gitignore` file
-
-- Insert `redis_data`, `postgres_data` and `mongo_data` directories in `.gitignore` file
-
-## Run Maven test command
-
-- Run maven clean test command to check if the project is working
-
-```shell
-./mvnw clean test
-```
-
-## Run Maven run command (Optional)
-
-- (Optional) `docker-compose up -d` to start the services, `./mvnw spring-boot:run` to run the Spring Boot project, `docker-compose rm -sf` to stop the services.
-
-## Let's do this step by step
diff --git a/prompts/create-spring-boot-kotlin-project.prompt.md b/prompts/create-spring-boot-kotlin-project.prompt.md
deleted file mode 100644
index 3554cd57..00000000
--- a/prompts/create-spring-boot-kotlin-project.prompt.md
+++ /dev/null
@@ -1,147 +0,0 @@
----
-agent: 'agent'
-description: 'Create Spring Boot Kotlin Project Skeleton'
----
-
-# Create Spring Boot Kotlin project prompt
-
-- Please make sure you have the following software installed on your system:
-
- - Java 21
- - Docker
- - Docker Compose
-
-- If you need to custom the project name, please change the `artifactId` and the `packageName` in [download-spring-boot-project-template](./create-spring-boot-kotlin-project.prompt.md#download-spring-boot-project-template)
-
-- If you need to update the Spring Boot version, please change the `bootVersion` in [download-spring-boot-project-template](./create-spring-boot-kotlin-project.prompt.md#download-spring-boot-project-template)
-
-## Check Java version
-
-- Run following command in terminal and check the version of Java
-
-```shell
-java -version
-```
-
-## Download Spring Boot project template
-
-- Run following command in terminal to download a Spring Boot project template
-
-```shell
-curl https://start.spring.io/starter.zip \
- -d artifactId=${input:projectName:demo-kotlin} \
- -d bootVersion=3.4.5 \
- -d dependencies=configuration-processor,webflux,data-r2dbc,postgresql,data-redis-reactive,data-mongodb-reactive,validation,cache,testcontainers \
- -d javaVersion=21 \
- -d language=kotlin \
- -d packageName=com.example \
- -d packaging=jar \
- -d type=gradle-project-kotlin \
- -o starter.zip
-```
-
-## Unzip the downloaded file
-
-- Run following command in terminal to unzip the downloaded file
-
-```shell
-unzip starter.zip -d ./${input:projectName:demo-kotlin}
-```
-
-## Remove the downloaded zip file
-
-- Run following command in terminal to delete the downloaded zip file
-
-```shell
-rm -f starter.zip
-```
-
-## Unzip the downloaded file
-
-- Run following command in terminal to unzip the downloaded file
-
-```shell
-unzip starter.zip -d ./${input:projectName:demo-kotlin}
-```
-
-## Add additional dependencies
-
-- Insert `springdoc-openapi-starter-webmvc-ui` and `archunit-junit5` dependency into `build.gradle.kts` file
-
-```gradle.kts
-dependencies {
- implementation("org.springdoc:springdoc-openapi-starter-webflux-ui:2.8.6")
- testImplementation("com.tngtech.archunit:archunit-junit5:1.2.1")
-}
-```
-
-- Insert SpringDoc configurations into `application.properties` file
-
-```properties
-# SpringDoc configurations
-springdoc.swagger-ui.doc-expansion=none
-springdoc.swagger-ui.operations-sorter=alpha
-springdoc.swagger-ui.tags-sorter=alpha
-```
-
-- Insert Redis configurations into `application.properties` file
-
-```properties
-# Redis configurations
-spring.data.redis.host=localhost
-spring.data.redis.port=6379
-spring.data.redis.password=rootroot
-```
-
-- Insert R2DBC configurations into `application.properties` file
-
-```properties
-# R2DBC configurations
-spring.r2dbc.url=r2dbc:postgresql://localhost:5432/postgres
-spring.r2dbc.username=postgres
-spring.r2dbc.password=rootroot
-
-spring.sql.init.mode=always
-spring.sql.init.platform=postgres
-spring.sql.init.continue-on-error=true
-```
-
-- Insert MongoDB configurations into `application.properties` file
-
-```properties
-# MongoDB configurations
-spring.data.mongodb.host=localhost
-spring.data.mongodb.port=27017
-spring.data.mongodb.authentication-database=admin
-spring.data.mongodb.username=root
-spring.data.mongodb.password=rootroot
-spring.data.mongodb.database=test
-```
-
-- Create `docker-compose.yaml` at project root and add following services: `redis:6`, `postgresql:17` and `mongo:8`.
-
- - redis service should have
- - password `rootroot`
- - mapping port 6379 to 6379
- - mounting volume `./redis_data` to `/data`
- - postgresql service should have
- - password `rootroot`
- - mapping port 5432 to 5432
- - mounting volume `./postgres_data` to `/var/lib/postgresql/data`
- - mongo service should have
- - initdb root username `root`
- - initdb root password `rootroot`
- - mapping port 27017 to 27017
- - mounting volume `./mongo_data` to `/data/db`
-
-- Insert `redis_data`, `postgres_data` and `mongo_data` directories in `.gitignore` file
-
-- Run gradle clean test command to check if the project is working
-
-```shell
-./gradlew clean test
-```
-
-- (Optional) `docker-compose up -d` to start the services, `./gradlew spring-boot:run` to run the Spring Boot project, `docker-compose rm -sf` to stop the services.
-
-Let's do this step by step.
diff --git a/prompts/create-technical-spike.prompt.md b/prompts/create-technical-spike.prompt.md
deleted file mode 100644
index 678b89e3..00000000
--- a/prompts/create-technical-spike.prompt.md
+++ /dev/null
@@ -1,231 +0,0 @@
----
-agent: 'agent'
-description: 'Create time-boxed technical spike documents for researching and resolving critical development decisions before implementation.'
-tools: ['runCommands', 'runTasks', 'edit', 'search', 'extensions', 'usages', 'vscodeAPI', 'think', 'problems', 'changes', 'testFailure', 'openSimpleBrowser', 'web/fetch', 'githubRepo', 'todos', 'Microsoft Docs', 'search']
----
-
-# Create Technical Spike Document
-
-Create time-boxed technical spike documents for researching critical questions that must be answered before development can proceed. Each spike focuses on a specific technical decision with clear deliverables and timelines.
-
-## Document Structure
-
-Create individual files in `${input:FolderPath|docs/spikes}` directory. Name each file using the pattern: `[category]-[short-description]-spike.md` (e.g., `api-copilot-integration-spike.md`, `performance-realtime-audio-spike.md`).
-
-```md
----
-title: "${input:SpikeTitle}"
-category: "${input:Category|Technical}"
-status: "π΄ Not Started"
-priority: "${input:Priority|High}"
-timebox: "${input:Timebox|1 week}"
-created: [YYYY-MM-DD]
-updated: [YYYY-MM-DD]
-owner: "${input:Owner}"
-tags: ["technical-spike", "${input:Category|technical}", "research"]
----
-
-# ${input:SpikeTitle}
-
-## Summary
-
-**Spike Objective:** [Clear, specific question or decision that needs resolution]
-
-**Why This Matters:** [Impact on development/architecture decisions]
-
-**Timebox:** [How much time allocated to this spike]
-
-**Decision Deadline:** [When this must be resolved to avoid blocking development]
-
-## Research Question(s)
-
-**Primary Question:** [Main technical question that needs answering]
-
-**Secondary Questions:**
-
-- [Related question 1]
-- [Related question 2]
-- [Related question 3]
-
-## Investigation Plan
-
-### Research Tasks
-
-- [ ] [Specific research task 1]
-- [ ] [Specific research task 2]
-- [ ] [Specific research task 3]
-- [ ] [Create proof of concept/prototype]
-- [ ] [Document findings and recommendations]
-
-### Success Criteria
-
-**This spike is complete when:**
-
-- [ ] [Specific criteria 1]
-- [ ] [Specific criteria 2]
-- [ ] [Clear recommendation documented]
-- [ ] [Proof of concept completed (if applicable)]
-
-## Technical Context
-
-**Related Components:** [List system components affected by this decision]
-
-**Dependencies:** [What other spikes or decisions depend on resolving this]
-
-**Constraints:** [Known limitations or requirements that affect the solution]
-
-## Research Findings
-
-### Investigation Results
-
-[Document research findings, test results, and evidence gathered]
-
-### Prototype/Testing Notes
-
-[Results from any prototypes, spikes, or technical experiments]
-
-### External Resources
-
-- [Link to relevant documentation]
-- [Link to API references]
-- [Link to community discussions]
-- [Link to examples/tutorials]
-
-## Decision
-
-### Recommendation
-
-[Clear recommendation based on research findings]
-
-### Rationale
-
-[Why this approach was chosen over alternatives]
-
-### Implementation Notes
-
-[Key considerations for implementation]
-
-### Follow-up Actions
-
-- [ ] [Action item 1]
-- [ ] [Action item 2]
-- [ ] [Update architecture documents]
-- [ ] [Create implementation tasks]
-
-## Status History
-
-| Date | Status | Notes |
-| ------ | -------------- | -------------------------- |
-| [Date] | π΄ Not Started | Spike created and scoped |
-| [Date] | π‘ In Progress | Research commenced |
-| [Date] | π’ Complete | [Resolution summary] |
-
----
-
-_Last updated: [Date] by [Name]_
-```
-
-## Categories for Technical Spikes
-
-### API Integration
-
-- Third-party API capabilities and limitations
-- Integration patterns and authentication
-- Rate limits and performance characteristics
-
-### Architecture & Design
-
-- System architecture decisions
-- Design pattern applicability
-- Component interaction models
-
-### Performance & Scalability
-
-- Performance requirements and constraints
-- Scalability bottlenecks and solutions
-- Resource utilization patterns
-
-### Platform & Infrastructure
-
-- Platform capabilities and limitations
-- Infrastructure requirements
-- Deployment and hosting considerations
-
-### Security & Compliance
-
-- Security requirements and implementations
-- Compliance constraints
-- Authentication and authorization approaches
-
-### User Experience
-
-- User interaction patterns
-- Accessibility requirements
-- Interface design decisions
-
-## File Naming Conventions
-
-Use descriptive, kebab-case names that indicate the category and specific unknown:
-
-**API/Integration Examples:**
-
-- `api-copilot-chat-integration-spike.md`
-- `api-azure-speech-realtime-spike.md`
-- `api-vscode-extension-capabilities-spike.md`
-
-**Performance Examples:**
-
-- `performance-audio-processing-latency-spike.md`
-- `performance-extension-host-limitations-spike.md`
-- `performance-webrtc-reliability-spike.md`
-
-**Architecture Examples:**
-
-- `architecture-voice-pipeline-design-spike.md`
-- `architecture-state-management-spike.md`
-- `architecture-error-handling-strategy-spike.md`
-
-## Best Practices for AI Agents
-
-1. **One Question Per Spike:** Each document focuses on a single technical decision or research question
-
-2. **Time-Boxed Research:** Define specific time limits and deliverables for each spike
-
-3. **Evidence-Based Decisions:** Require concrete evidence (tests, prototypes, documentation) before marking as complete
-
-4. **Clear Recommendations:** Document specific recommendations and rationale for implementation
-
-5. **Dependency Tracking:** Identify how spikes relate to each other and impact project decisions
-
-6. **Outcome-Focused:** Every spike must result in an actionable decision or recommendation
-
-## Research Strategy
-
-### Phase 1: Information Gathering
-
-1. **Search existing documentation** using search/fetch tools
-2. **Analyze codebase** for existing patterns and constraints
-3. **Research external resources** (APIs, libraries, examples)
-
-### Phase 2: Validation & Testing
-
-1. **Create focused prototypes** to test specific hypotheses
-2. **Run targeted experiments** to validate assumptions
-3. **Document test results** with supporting evidence
-
-### Phase 3: Decision & Documentation
-
-1. **Synthesize findings** into clear recommendations
-2. **Document implementation guidance** for development team
-3. **Create follow-up tasks** for implementation
-
-## Tools Usage
-
-- **search/searchResults:** Research existing solutions and documentation
-- **fetch/githubRepo:** Analyze external APIs, libraries, and examples
-- **codebase:** Understand existing system constraints and patterns
-- **runTasks:** Execute prototypes and validation tests
-- **editFiles:** Update research progress and findings
-- **vscodeAPI:** Test VS Code extension capabilities and limitations
-
-Focus on time-boxed research that resolves critical technical decisions and unblocks development progress.
diff --git a/prompts/create-tldr-page.prompt.md b/prompts/create-tldr-page.prompt.md
deleted file mode 100644
index fa5f6751..00000000
--- a/prompts/create-tldr-page.prompt.md
+++ /dev/null
@@ -1,211 +0,0 @@
----
-agent: 'agent'
-description: 'Create a tldr page from documentation URLs and command examples, requiring both URL and command name.'
-tools: ['edit/createFile', 'web/fetch']
----
-
-# Create TLDR Page
-
-## Overview
-
-You are an expert technical documentation specialist who creates concise, actionable `tldr` pages
-following the tldr-pages project standards. Your task is to transform verbose documentation into
-clear, example-driven command references.
-
-## Objectives
-
-1. **Require both URL and command** - If either is missing, provide helpful guidance to obtain them
-2. **Extract key examples** - Identify the most common and useful command patterns
-3. **Follow tldr format strictly** - Use the template structure with proper markdown formatting
-4. **Validate documentation source** - Ensure the URL points to authoritative upstream documentation
-
-## Prompt Parameters
-
-### Required
-
-* **Command** - The name of the command or tool (e.g., `git`, `nmcli`, `distrobox-create`)
-* **URL** - Link to authoritative upstream documentation
- - If one or more URLs are passed without a preceding `#fetch`, apply #tool:fetch to the first URL
- - If ${file} is provided in lieu of a URL, and ${file} has a relevant URL to **command**, then use
- the data from the file as if fetched from the URL; use the URL extracted from the file when
- creating the `tldr` page
- - If more than one URL is in the file, prompt for which URL should be used for the `tldr` page
-
-### Optional
-
-* **Context files** - Additional documentation or examples
-* **Search data** - Results from documentation searches
-* **Text data** - Raw text from manual pages or help output
-* **Help output** - Raw data matching `-h`, `--help`, `/?`, `--tldr`, `--man`, etc.
-
-> [!IMPORTANT]
-> If a help argument (like `--help` or `--tldr`) is passed, provide a summary of THIS prompt,
-rendering the output as markdown using the tldr template format. Do NOT create a new tldr page for
-the command.
-
-## Usage
-
-### Syntax
-
-```bash
-/create-tldr-page #fetch [text data] [context file]
-```
-
-### Error Handling
-
-#### Missing Command
-
-**User**
-
-```bash
-/create-tldr-page https://some-command.io/docs/manual.html
-```
-
-**Agent**
-
-```text
-I'll fetch the URL and analyze the documentation.
-From the data extracted, I assume the command is `some-command`. Is this correct? (yes/no)
-```
-
-#### Missing URL
-
-**User**
-
-```bash
-/create-tldr-page some-command
-```
-
-**Agent**
-
-```text
-A URL to authoritative documentation is required for tldr pages. Here are some acceptable URL
-patterns:
-
-1. https://gnu.org/software/manual/html_node/some-command.html
-2. https://some.org/serve/some.man.html#some-command
-3. https://some-command.io/docs/cli/latest/manual
-4. https://some-command.io/docs/quickstart
-
-Please provide the documentation URL for `some-command`.
-```
-
-## Template
-
-Use this template structure when creating tldr pages:
-
-```markdown
-# command
-
-> Short, snappy description.
-> Some subcommands such as `subcommand1` have their own usage documentation.
-> More information: .
-
-- View documentation for creating something:
-
-`tldr command-subcommand1`
-
-- View documentation for managing something:
-
-`tldr command-subcommand2`
-```
-
-### Template Guidelines
-
-- **Title**: Use exact command name (lowercase)
-- **Description**: One-line summary of what the command does
-- **Subcommands note**: Only include if relevant
-- **More information**: Link to authoritative upstream documentation (required)
-- **Examples**: 5-8 most common use cases, ordered by frequency of use
-- **Placeholders**: Use `{{placeholder}}` syntax for user-provided values
-
-## Examples
-
-### Reference Examples
-
-You MAY fetch these example tldr pages to understand the proper format and style:
-
-* [git](https://raw.githubusercontent.com/jhauga/tldr/refs/heads/main/pages/common/git.md)
-* [distrobox-create](https://raw.githubusercontent.com/jhauga/tldr/refs/heads/main/pages/linux/distrobox-create.md)
-* [nmcli](https://raw.githubusercontent.com/jhauga/tldr/refs/heads/main/pages/linux/nmcli.md)
-
-### Expected Output Using Example
-
-**User**
-
-```bash
-/create-tldr-page #fetch https://git-scm.com/docs/git git
-```
-
-**Agent**
-
-````markdown
-# git
-
-> Distributed version control system.
-> Some subcommands such as `commit`, `add`, `branch`, `switch`, `push`, etc. have their own usage documentation.
-> More information: .
-
-- Create an empty Git repository:
-
-`git init`
-
-- Clone a remote Git repository from the internet:
-
-`git clone {{https://example.com/repo.git}}`
-
-- View the status of the local repository:
-
-`git status`
-
-- Stage all changes for a commit:
-
-`git add {{[-A|--all]}}`
-
-- Commit changes to version history:
-
-`git commit {{[-m|--message]}} {{message_text}}`
-
-- Push local commits to a remote repository:
-
-`git push`
-
-- Pull any changes made to a remote:
-
-`git pull`
-
-- Reset everything the way it was in the latest commit:
-
-`git reset --hard; git clean {{[-f|--force]}}`
-````
-
-### Output Formatting Rules
-
-You MUST follow these placeholder conventions:
-
-- **Options with arguments**: When an option takes an argument, wrap BOTH the option AND its argument separately
- - Example: `minipro {{[-p|--device]}} {{chip_name}}`
- - Example: `git commit {{[-m|--message]}} {{message_text}}`
- - **DO NOT** combine them as: `minipro -p {{chip_name}}` (incorrect)
-
-- **Options without arguments**: Wrap standalone options (flags) that don't take arguments
- - Example: `minipro {{[-E|--erase]}}`
- - Example: `git add {{[-A|--all]}}`
-
-- **Single short options**: Do NOT wrap single short options when used alone without long form
- - Example: `ls -l` (not wrapped)
- - Example: `minipro -L` (not wrapped)
- - However, if both short and long forms exist, wrap them: `{{[-l|--list]}}`
-
-- **Subcommands**: Generally do NOT wrap subcommands unless they are user-provided variables
- - Example: `git init` (not wrapped)
- - Example: `tldr {{command}}` (wrapped when variable)
-
-- **Arguments and operands**: Always wrap user-provided values
- - Example: `{{device_name}}`, `{{chip_name}}`, `{{repository_url}}`
- - Example: `{{path/to/file}}` for file paths
- - Example: `{{https://example.com}}` for URLs
-
-- **Command structure**: Options should appear BEFORE their arguments in the placeholder syntax
- - Correct: `command {{[-o|--option]}} {{value}}`
- - Incorrect: `command -o {{value}}`
diff --git a/prompts/csharp-async.prompt.md b/prompts/csharp-async.prompt.md
deleted file mode 100644
index 8291c350..00000000
--- a/prompts/csharp-async.prompt.md
+++ /dev/null
@@ -1,50 +0,0 @@
----
-agent: 'agent'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'problems']
-description: 'Get best practices for C# async programming'
----
-
-# C# Async Programming Best Practices
-
-Your goal is to help me follow best practices for asynchronous programming in C#.
-
-## Naming Conventions
-
-- Use the 'Async' suffix for all async methods
-- Match method names with their synchronous counterparts when applicable (e.g., `GetDataAsync()` for `GetData()`)
-
-## Return Types
-
-- Return `Task` when the method returns a value
-- Return `Task` when the method doesn't return a value
-- Consider `ValueTask` for high-performance scenarios to reduce allocations
-- Avoid returning `void` for async methods except for event handlers
-
-## Exception Handling
-
-- Use try/catch blocks around await expressions
-- Avoid swallowing exceptions in async methods
-- Use `ConfigureAwait(false)` when appropriate to prevent deadlocks in library code
-- Propagate exceptions with `Task.FromException()` instead of throwing in async Task returning methods
-
-## Performance
-
-- Use `Task.WhenAll()` for parallel execution of multiple tasks
-- Use `Task.WhenAny()` for implementing timeouts or taking the first completed task
-- Avoid unnecessary async/await when simply passing through task results
-- Consider cancellation tokens for long-running operations
-
-## Common Pitfalls
-
-- Never use `.Wait()`, `.Result`, or `.GetAwaiter().GetResult()` in async code
-- Avoid mixing blocking and async code
-- Don't create async void methods (except for event handlers)
-- Always await Task-returning methods
-
-## Implementation Patterns
-
-- Implement the async command pattern for long-running operations
-- Use async streams (IAsyncEnumerable) for processing sequences asynchronously
-- Consider the task-based asynchronous pattern (TAP) for public APIs
-
-When reviewing my C# code, identify these issues and suggest improvements that follow these best practices.
diff --git a/prompts/csharp-docs.prompt.md b/prompts/csharp-docs.prompt.md
deleted file mode 100644
index 23687706..00000000
--- a/prompts/csharp-docs.prompt.md
+++ /dev/null
@@ -1,63 +0,0 @@
----
-agent: 'agent'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'problems']
-description: 'Ensure that C# types are documented with XML comments and follow best practices for documentation.'
----
-
-# C# Documentation Best Practices
-
-- Public members should be documented with XML comments.
-- It is encouraged to document internal members as well, especially if they are complex or not self-explanatory.
-
-## Guidance for all APIs
-
-- Use `` to provide a brief, one sentence, description of what the type or member does. Start the summary with a present-tense, third-person verb.
-- Use `` for additional information, which can include implementation details, usage notes, or any other relevant context.
-- Use `` for language-specific keywords like `null`, `true`, `false`, `int`, `bool`, etc.
-- Use `` for inline code snippets.
-- Use `` for usage examples on how to use the member.
- - Use `` for code blocks. `` tags should be placed within an `` tag. Add the language of the code example using the `language` attribute, for example, ``.
-- Use `` to reference other types or members inline (in a sentence).
-- Use `` for standalone (not in a sentence) references to other types or members in the "See also" section of the online docs.
-- Use `` to inherit documentation from base classes or interfaces.
- - Unless there is major behavior change, in which case you should document the differences.
-
-## Methods
-
-- Use `` to describe method parameters.
- - The description should be a noun phrase that doesn't specify the data type.
- - Begin with an introductory article.
- - If the parameter is a flag enum, start the description with "A bitwise combination of the enumeration values that specifies...".
- - If the parameter is a non-flag enum, start the description with "One of the enumeration values that specifies...".
- - If the parameter is a Boolean, the wording should be of the form "`` to ...; otherwise, ``.".
- - If the parameter is an "out" parameter, the wording should be of the form "When this method returns, contains .... This parameter is treated as uninitialized.".
-- Use `` to reference parameter names in documentation.
-- Use `` to describe type parameters in generic types or methods.
-- Use `` to reference type parameters in documentation.
-- Use `` to describe what the method returns.
- - The description should be a noun phrase that doesn't specify the data type.
- - Begin with an introductory article.
- - If the return type is Boolean, the wording should be of the form "`` if ...; otherwise, ``.".
-
-## Constructors
-
-- The summary wording should be "Initializes a new instance of the class [or struct].".
-
-## Properties
-
-- The `` should start with:
- - "Gets or sets..." for a read-write property.
- - "Gets..." for a read-only property.
- - "Gets [or sets] a value that indicates whether..." for properties that return a Boolean value.
-- Use `` to describe the value of the property.
- - The description should be a noun phrase that doesn't specify the data type.
- - If the property has a default value, add it in a separate sentence, for example, "The default is ``".
- - If the value type is Boolean, the wording should be of the form "`` if ...; otherwise, ``. The default is ...".
-
-## Exceptions
-
-- Use `` to document exceptions thrown by constructors, properties, indexers, methods, operators, and events.
-- Document all exceptions thrown directly by the member.
-- For exceptions thrown by nested members, document only the exceptions users are most likely to encounter.
-- The description of the exception describes the condition under which it's thrown.
- - Omit "Thrown if ..." or "If ..." at the beginning of the sentence. Just state the condition directly, for example "An error occurred when accessing a Message Queuing API."
diff --git a/prompts/csharp-mcp-server-generator.prompt.md b/prompts/csharp-mcp-server-generator.prompt.md
deleted file mode 100644
index e0218d01..00000000
--- a/prompts/csharp-mcp-server-generator.prompt.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-agent: 'agent'
-description: 'Generate a complete MCP server project in C# with tools, prompts, and proper configuration'
----
-
-# Generate C# MCP Server
-
-Create a complete Model Context Protocol (MCP) server in C# with the following specifications:
-
-## Requirements
-
-1. **Project Structure**: Create a new C# console application with proper directory structure
-2. **NuGet Packages**: Include ModelContextProtocol (prerelease) and Microsoft.Extensions.Hosting
-3. **Logging Configuration**: Configure all logs to stderr to avoid interfering with stdio transport
-4. **Server Setup**: Use the Host builder pattern with proper DI configuration
-5. **Tools**: Create at least one useful tool with proper attributes and descriptions
-6. **Error Handling**: Include proper error handling and validation
-
-## Implementation Details
-
-### Basic Project Setup
-- Use .NET 8.0 or later
-- Create a console application
-- Add necessary NuGet packages with --prerelease flag
-- Configure logging to stderr
-
-### Server Configuration
-- Use `Host.CreateApplicationBuilder` for DI and lifecycle management
-- Configure `AddMcpServer()` with stdio transport
-- Use `WithToolsFromAssembly()` for automatic tool discovery
-- Ensure the server runs with `RunAsync()`
-
-### Tool Implementation
-- Use `[McpServerToolType]` attribute on tool classes
-- Use `[McpServerTool]` attribute on tool methods
-- Add `[Description]` attributes to tools and parameters
-- Support async operations where appropriate
-- Include proper parameter validation
-
-### Code Quality
-- Follow C# naming conventions
-- Include XML documentation comments
-- Use nullable reference types
-- Implement proper error handling with McpProtocolException
-- Use structured logging for debugging
-
-## Example Tool Types to Consider
-- File operations (read, write, search)
-- Data processing (transform, validate, analyze)
-- External API integrations (HTTP requests)
-- System operations (execute commands, check status)
-- Database operations (query, update)
-
-## Testing Guidance
-- Explain how to run the server
-- Provide example commands to test with MCP clients
-- Include troubleshooting tips
-
-Generate a complete, production-ready MCP server with comprehensive documentation and error handling.
diff --git a/prompts/csharp-mstest.prompt.md b/prompts/csharp-mstest.prompt.md
deleted file mode 100644
index 9a27bda8..00000000
--- a/prompts/csharp-mstest.prompt.md
+++ /dev/null
@@ -1,479 +0,0 @@
----
-agent: 'agent'
-tools: ['changes', 'search/codebase', 'edit/editFiles', 'problems', 'search']
-description: 'Get best practices for MSTest 3.x/4.x unit testing, including modern assertion APIs and data-driven tests'
----
-
-# MSTest Best Practices (MSTest 3.x/4.x)
-
-Your goal is to help me write effective unit tests with modern MSTest, using current APIs and best practices.
-
-## Project Setup
-
-- Use a separate test project with naming convention `[ProjectName].Tests`
-- Reference MSTest 3.x+ NuGet packages (includes analyzers)
-- Consider using MSTest.Sdk for simplified project setup
-- Run tests with `dotnet test`
-
-## Test Class Structure
-
-- Use `[TestClass]` attribute for test classes
-- **Seal test classes by default** for performance and design clarity
-- Use `[TestMethod]` for test methods (prefer over `[DataTestMethod]`)
-- Follow Arrange-Act-Assert (AAA) pattern
-- Name tests using pattern `MethodName_Scenario_ExpectedBehavior`
-
-```csharp
-[TestClass]
-public sealed class CalculatorTests
-{
- [TestMethod]
- public void Add_TwoPositiveNumbers_ReturnsSum()
- {
- // Arrange
- var calculator = new Calculator();
-
- // Act
- var result = calculator.Add(2, 3);
-
- // Assert
- Assert.AreEqual(5, result);
- }
-}
-```
-
-## Test Lifecycle
-
-- **Prefer constructors over `[TestInitialize]`** - enables `readonly` fields and follows standard C# patterns
-- Use `[TestCleanup]` for cleanup that must run even if test fails
-- Combine constructor with async `[TestInitialize]` when async setup is needed
-
-```csharp
-[TestClass]
-public sealed class ServiceTests
-{
- private readonly MyService _service; // readonly enabled by constructor
-
- public ServiceTests()
- {
- _service = new MyService();
- }
-
- [TestInitialize]
- public async Task InitAsync()
- {
- // Use for async initialization only
- await _service.WarmupAsync();
- }
-
- [TestCleanup]
- public void Cleanup() => _service.Reset();
-}
-```
-
-### Execution Order
-
-1. **Assembly Initialization** - `[AssemblyInitialize]` (once per test assembly)
-2. **Class Initialization** - `[ClassInitialize]` (once per test class)
-3. **Test Initialization** (for every test method):
- 1. Constructor
- 2. Set `TestContext` property
- 3. `[TestInitialize]`
-4. **Test Execution** - test method runs
-5. **Test Cleanup** (for every test method):
- 1. `[TestCleanup]`
- 2. `DisposeAsync` (if implemented)
- 3. `Dispose` (if implemented)
-6. **Class Cleanup** - `[ClassCleanup]` (once per test class)
-7. **Assembly Cleanup** - `[AssemblyCleanup]` (once per test assembly)
-
-## Modern Assertion APIs
-
-MSTest provides three assertion classes: `Assert`, `StringAssert`, and `CollectionAssert`.
-
-### Assert Class - Core Assertions
-
-```csharp
-// Equality
-Assert.AreEqual(expected, actual);
-Assert.AreNotEqual(notExpected, actual);
-Assert.AreSame(expectedObject, actualObject); // Reference equality
-Assert.AreNotSame(notExpectedObject, actualObject);
-
-// Null checks
-Assert.IsNull(value);
-Assert.IsNotNull(value);
-
-// Boolean
-Assert.IsTrue(condition);
-Assert.IsFalse(condition);
-
-// Fail/Inconclusive
-Assert.Fail("Test failed due to...");
-Assert.Inconclusive("Test cannot be completed because...");
-```
-
-### Exception Testing (Prefer over `[ExpectedException]`)
-
-```csharp
-// Assert.Throws - matches TException or derived types
-var ex = Assert.Throws(() => Method(null));
-Assert.AreEqual("Value cannot be null.", ex.Message);
-
-// Assert.ThrowsExactly - matches exact type only
-var ex = Assert.ThrowsExactly(() => Method());
-
-// Async versions
-var ex = await Assert.ThrowsAsync(async () => await client.GetAsync(url));
-var ex = await Assert.ThrowsExactlyAsync(async () => await Method());
-```
-
-### Collection Assertions (Assert class)
-
-```csharp
-Assert.Contains(expectedItem, collection);
-Assert.DoesNotContain(unexpectedItem, collection);
-Assert.ContainsSingle(collection); // exactly one element
-Assert.HasCount(5, collection);
-Assert.IsEmpty(collection);
-Assert.IsNotEmpty(collection);
-```
-
-### String Assertions (Assert class)
-
-```csharp
-Assert.Contains("expected", actualString);
-Assert.StartsWith("prefix", actualString);
-Assert.EndsWith("suffix", actualString);
-Assert.DoesNotStartWith("prefix", actualString);
-Assert.DoesNotEndWith("suffix", actualString);
-Assert.MatchesRegex(@"\d{3}-\d{4}", phoneNumber);
-Assert.DoesNotMatchRegex(@"\d+", textOnly);
-```
-
-### Comparison Assertions
-
-```csharp
-Assert.IsGreaterThan(lowerBound, actual);
-Assert.IsGreaterThanOrEqualTo(lowerBound, actual);
-Assert.IsLessThan(upperBound, actual);
-Assert.IsLessThanOrEqualTo(upperBound, actual);
-Assert.IsInRange(actual, low, high);
-Assert.IsPositive(number);
-Assert.IsNegative(number);
-```
-
-### Type Assertions
-
-```csharp
-// MSTest 3.x - uses out parameter
-Assert.IsInstanceOfType(obj, out var typed);
-typed.DoSomething();
-
-// MSTest 4.x - returns typed result directly
-var typed = Assert.IsInstanceOfType(obj);
-typed.DoSomething();
-
-Assert.IsNotInstanceOfType(obj);
-```
-
-### Assert.That (MSTest 4.0+)
-
-```csharp
-Assert.That(result.Count > 0); // Auto-captures expression in failure message
-```
-
-### StringAssert Class
-
-> **Note:** Prefer `Assert` class equivalents when available (e.g., `Assert.Contains("expected", actual)` over `StringAssert.Contains(actual, "expected")`).
-
-```csharp
-StringAssert.Contains(actualString, "expected");
-StringAssert.StartsWith(actualString, "prefix");
-StringAssert.EndsWith(actualString, "suffix");
-StringAssert.Matches(actualString, new Regex(@"\d{3}-\d{4}"));
-StringAssert.DoesNotMatch(actualString, new Regex(@"\d+"));
-```
-
-### CollectionAssert Class
-
-> **Note:** Prefer `Assert` class equivalents when available (e.g., `Assert.Contains`).
-
-```csharp
-// Containment
-CollectionAssert.Contains(collection, expectedItem);
-CollectionAssert.DoesNotContain(collection, unexpectedItem);
-
-// Equality (same elements, same order)
-CollectionAssert.AreEqual(expectedCollection, actualCollection);
-CollectionAssert.AreNotEqual(unexpectedCollection, actualCollection);
-
-// Equivalence (same elements, any order)
-CollectionAssert.AreEquivalent(expectedCollection, actualCollection);
-CollectionAssert.AreNotEquivalent(unexpectedCollection, actualCollection);
-
-// Subset checks
-CollectionAssert.IsSubsetOf(subset, superset);
-CollectionAssert.IsNotSubsetOf(notSubset, collection);
-
-// Element validation
-CollectionAssert.AllItemsAreInstancesOfType(collection, typeof(MyClass));
-CollectionAssert.AllItemsAreNotNull(collection);
-CollectionAssert.AllItemsAreUnique(collection);
-```
-
-## Data-Driven Tests
-
-### DataRow
-
-```csharp
-[TestMethod]
-[DataRow(1, 2, 3)]
-[DataRow(0, 0, 0, DisplayName = "Zeros")]
-[DataRow(-1, 1, 0, IgnoreMessage = "Known issue #123")] // MSTest 3.8+
-public void Add_ReturnsSum(int a, int b, int expected)
-{
- Assert.AreEqual(expected, Calculator.Add(a, b));
-}
-```
-
-### DynamicData
-
-The data source can return any of the following types:
-
-- `IEnumerable<(T1, T2, ...)>` (ValueTuple) - **preferred**, provides type safety (MSTest 3.7+)
-- `IEnumerable>` - provides type safety
-- `IEnumerable` - provides type safety plus control over test metadata (display name, categories)
-- `IEnumerable