mirror of
https://github.com/github/awesome-copilot.git
synced 2026-02-24 12:25:11 +00:00
Merge branch 'staged' into main
This commit is contained in:
90
.github/plugin/marketplace.json
vendored
90
.github/plugin/marketplace.json
vendored
@@ -12,271 +12,271 @@
|
||||
"plugins": [
|
||||
{
|
||||
"name": "awesome-copilot",
|
||||
"source": "./plugins/awesome-copilot",
|
||||
"source": "awesome-copilot",
|
||||
"description": "Meta prompts that help you discover and generate curated GitHub Copilot agents, instructions, prompts, and skills.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "azure-cloud-development",
|
||||
"source": "./plugins/azure-cloud-development",
|
||||
"source": "azure-cloud-development",
|
||||
"description": "Comprehensive Azure cloud development tools including Infrastructure as Code, serverless functions, architecture patterns, and cost optimization for building scalable cloud applications.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "cast-imaging",
|
||||
"source": "./plugins/cast-imaging",
|
||||
"source": "cast-imaging",
|
||||
"description": "A comprehensive collection of specialized agents for software analysis, impact assessment, structural quality advisories, and architectural review using CAST Imaging.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "clojure-interactive-programming",
|
||||
"source": "./plugins/clojure-interactive-programming",
|
||||
"source": "clojure-interactive-programming",
|
||||
"description": "Tools for REPL-first Clojure workflows featuring Clojure instructions, the interactive programming chat mode and supporting guidance.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "context-engineering",
|
||||
"source": "./plugins/context-engineering",
|
||||
"source": "context-engineering",
|
||||
"description": "Tools and techniques for maximizing GitHub Copilot effectiveness through better context management. Includes guidelines for structuring code, an agent for planning multi-file changes, and prompts for context-aware development.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "copilot-sdk",
|
||||
"source": "./plugins/copilot-sdk",
|
||||
"source": "copilot-sdk",
|
||||
"description": "Build applications with the GitHub Copilot SDK across multiple programming languages. Includes comprehensive instructions for C#, Go, Node.js/TypeScript, and Python to help you create AI-powered applications.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "csharp-dotnet-development",
|
||||
"source": "./plugins/csharp-dotnet-development",
|
||||
"source": "csharp-dotnet-development",
|
||||
"description": "Essential prompts, instructions, and chat modes for C# and .NET development including testing, documentation, and best practices.",
|
||||
"version": "1.1.0"
|
||||
},
|
||||
{
|
||||
"name": "csharp-mcp-development",
|
||||
"source": "./plugins/csharp-mcp-development",
|
||||
"source": "csharp-mcp-development",
|
||||
"description": "Complete toolkit for building Model Context Protocol (MCP) servers in C# using the official SDK. Includes instructions for best practices, a prompt for generating servers, and an expert chat mode for guidance.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "database-data-management",
|
||||
"source": "./plugins/database-data-management",
|
||||
"source": "database-data-management",
|
||||
"description": "Database administration, SQL optimization, and data management tools for PostgreSQL, SQL Server, and general database development best practices.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "dataverse-sdk-for-python",
|
||||
"source": "./plugins/dataverse-sdk-for-python",
|
||||
"source": "dataverse-sdk-for-python",
|
||||
"description": "Comprehensive collection for building production-ready Python integrations with Microsoft Dataverse. Includes official documentation, best practices, advanced features, file operations, and code generation prompts.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "devops-oncall",
|
||||
"source": "./plugins/devops-oncall",
|
||||
"source": "devops-oncall",
|
||||
"description": "A focused set of prompts, instructions, and a chat mode to help triage incidents and respond quickly with DevOps tools and Azure resources.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "edge-ai-tasks",
|
||||
"source": "./plugins/edge-ai-tasks",
|
||||
"source": "edge-ai-tasks",
|
||||
"description": "Task Researcher and Task Planner for intermediate to expert users and large codebases - Brought to you by microsoft/edge-ai",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "frontend-web-dev",
|
||||
"source": "./plugins/frontend-web-dev",
|
||||
"source": "frontend-web-dev",
|
||||
"description": "Essential prompts, instructions, and chat modes for modern frontend web development including React, Angular, Vue, TypeScript, and CSS frameworks.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "gem-team",
|
||||
"source": "./plugins/gem-team",
|
||||
"source": "gem-team",
|
||||
"description": "A modular multi-agent team for complex project execution with DAG-based planning, parallel execution, TDD verification, and automated testing.",
|
||||
"version": "1.1.0"
|
||||
},
|
||||
{
|
||||
"name": "go-mcp-development",
|
||||
"source": "./plugins/go-mcp-development",
|
||||
"source": "go-mcp-development",
|
||||
"description": "Complete toolkit for building Model Context Protocol (MCP) servers in Go using the official github.com/modelcontextprotocol/go-sdk. Includes instructions for best practices, a prompt for generating servers, and an expert chat mode for guidance.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "java-development",
|
||||
"source": "./plugins/java-development",
|
||||
"source": "java-development",
|
||||
"description": "Comprehensive collection of prompts and instructions for Java development including Spring Boot, Quarkus, testing, documentation, and best practices.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "java-mcp-development",
|
||||
"source": "./plugins/java-mcp-development",
|
||||
"source": "java-mcp-development",
|
||||
"description": "Complete toolkit for building Model Context Protocol servers in Java using the official MCP Java SDK with reactive streams and Spring Boot integration.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "kotlin-mcp-development",
|
||||
"source": "./plugins/kotlin-mcp-development",
|
||||
"source": "kotlin-mcp-development",
|
||||
"description": "Complete toolkit for building Model Context Protocol (MCP) servers in Kotlin using the official io.modelcontextprotocol:kotlin-sdk library. Includes instructions for best practices, a prompt for generating servers, and an expert chat mode for guidance.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "mcp-m365-copilot",
|
||||
"source": "./plugins/mcp-m365-copilot",
|
||||
"source": "mcp-m365-copilot",
|
||||
"description": "Comprehensive collection for building declarative agents with Model Context Protocol integration for Microsoft 365 Copilot",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "openapi-to-application-csharp-dotnet",
|
||||
"source": "./plugins/openapi-to-application-csharp-dotnet",
|
||||
"source": "openapi-to-application-csharp-dotnet",
|
||||
"description": "Generate production-ready .NET applications from OpenAPI specifications. Includes ASP.NET Core project scaffolding, controller generation, entity framework integration, and C# best practices.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "openapi-to-application-go",
|
||||
"source": "./plugins/openapi-to-application-go",
|
||||
"source": "openapi-to-application-go",
|
||||
"description": "Generate production-ready Go applications from OpenAPI specifications. Includes project scaffolding, handler generation, middleware setup, and Go best practices for REST APIs.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "openapi-to-application-java-spring-boot",
|
||||
"source": "./plugins/openapi-to-application-java-spring-boot",
|
||||
"source": "openapi-to-application-java-spring-boot",
|
||||
"description": "Generate production-ready Spring Boot applications from OpenAPI specifications. Includes project scaffolding, REST controller generation, service layer organization, and Spring Boot best practices.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "openapi-to-application-nodejs-nestjs",
|
||||
"source": "./plugins/openapi-to-application-nodejs-nestjs",
|
||||
"source": "openapi-to-application-nodejs-nestjs",
|
||||
"description": "Generate production-ready NestJS applications from OpenAPI specifications. Includes project scaffolding, controller and service generation, TypeScript best practices, and enterprise patterns.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "openapi-to-application-python-fastapi",
|
||||
"source": "./plugins/openapi-to-application-python-fastapi",
|
||||
"source": "openapi-to-application-python-fastapi",
|
||||
"description": "Generate production-ready FastAPI applications from OpenAPI specifications. Includes project scaffolding, route generation, dependency injection, and Python best practices for async APIs.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "ospo-sponsorship",
|
||||
"source": "./plugins/ospo-sponsorship",
|
||||
"source": "ospo-sponsorship",
|
||||
"description": "Tools and resources for Open Source Program Offices (OSPOs) to identify, evaluate, and manage sponsorship of open source dependencies through GitHub Sponsors, Open Collective, and other funding platforms.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "partners",
|
||||
"source": "./plugins/partners",
|
||||
"source": "partners",
|
||||
"description": "Custom agents that have been created by GitHub partners",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "pcf-development",
|
||||
"source": "./plugins/pcf-development",
|
||||
"source": "pcf-development",
|
||||
"description": "Complete toolkit for developing custom code components using Power Apps Component Framework for model-driven and canvas apps",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "php-mcp-development",
|
||||
"source": "./plugins/php-mcp-development",
|
||||
"source": "php-mcp-development",
|
||||
"description": "Comprehensive resources for building Model Context Protocol servers using the official PHP SDK with attribute-based discovery, including best practices, project generation, and expert assistance",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "polyglot-test-agent",
|
||||
"source": "./plugins/polyglot-test-agent",
|
||||
"source": "polyglot-test-agent",
|
||||
"description": "Multi-agent pipeline for generating comprehensive unit tests across any programming language. Orchestrates research, planning, and implementation phases using specialized agents to produce tests that compile, pass, and follow project conventions.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "power-apps-code-apps",
|
||||
"source": "./plugins/power-apps-code-apps",
|
||||
"source": "power-apps-code-apps",
|
||||
"description": "Complete toolkit for Power Apps Code Apps development including project scaffolding, development standards, and expert guidance for building code-first applications with Power Platform integration.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "power-bi-development",
|
||||
"source": "./plugins/power-bi-development",
|
||||
"source": "power-bi-development",
|
||||
"description": "Comprehensive Power BI development resources including data modeling, DAX optimization, performance tuning, visualization design, security best practices, and DevOps/ALM guidance for building enterprise-grade Power BI solutions.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "power-platform-mcp-connector-development",
|
||||
"source": "./plugins/power-platform-mcp-connector-development",
|
||||
"source": "power-platform-mcp-connector-development",
|
||||
"description": "Complete toolkit for developing Power Platform custom connectors with Model Context Protocol integration for Microsoft Copilot Studio",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "project-planning",
|
||||
"source": "./plugins/project-planning",
|
||||
"source": "project-planning",
|
||||
"description": "Tools and guidance for software project planning, feature breakdown, epic management, implementation planning, and task organization for development teams.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "python-mcp-development",
|
||||
"source": "./plugins/python-mcp-development",
|
||||
"source": "python-mcp-development",
|
||||
"description": "Complete toolkit for building Model Context Protocol (MCP) servers in Python using the official SDK with FastMCP. Includes instructions for best practices, a prompt for generating servers, and an expert chat mode for guidance.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "ruby-mcp-development",
|
||||
"source": "./plugins/ruby-mcp-development",
|
||||
"source": "ruby-mcp-development",
|
||||
"description": "Complete toolkit for building Model Context Protocol servers in Ruby using the official MCP Ruby SDK gem with Rails integration support.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "rug-agentic-workflow",
|
||||
"source": "./plugins/rug-agentic-workflow",
|
||||
"source": "rug-agentic-workflow",
|
||||
"description": "Three-agent workflow for orchestrated software delivery with an orchestrator plus implementation and QA subagents.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "rust-mcp-development",
|
||||
"source": "./plugins/rust-mcp-development",
|
||||
"source": "rust-mcp-development",
|
||||
"description": "Build high-performance Model Context Protocol servers in Rust using the official rmcp SDK with async/await, procedural macros, and type-safe implementations.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "security-best-practices",
|
||||
"source": "./plugins/security-best-practices",
|
||||
"source": "security-best-practices",
|
||||
"description": "Security frameworks, accessibility guidelines, performance optimization, and code quality best practices for building secure, maintainable, and high-performance applications.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "software-engineering-team",
|
||||
"source": "./plugins/software-engineering-team",
|
||||
"source": "software-engineering-team",
|
||||
"description": "7 specialized agents covering the full software development lifecycle from UX design and architecture to security and DevOps.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "structured-autonomy",
|
||||
"source": "./plugins/structured-autonomy",
|
||||
"source": "structured-autonomy",
|
||||
"description": "Premium planning, thrifty implementation",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "swift-mcp-development",
|
||||
"source": "./plugins/swift-mcp-development",
|
||||
"source": "swift-mcp-development",
|
||||
"description": "Comprehensive collection for building Model Context Protocol servers in Swift using the official MCP Swift SDK with modern concurrency features.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "technical-spike",
|
||||
"source": "./plugins/technical-spike",
|
||||
"source": "technical-spike",
|
||||
"description": "Tools for creation, management and research of technical spikes to reduce unknowns and assumptions before proceeding to specification and implementation of solutions.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "testing-automation",
|
||||
"source": "./plugins/testing-automation",
|
||||
"source": "testing-automation",
|
||||
"description": "Comprehensive collection for writing tests, test automation, and test-driven development including unit tests, integration tests, and end-to-end testing strategies.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "typescript-mcp-development",
|
||||
"source": "./plugins/typescript-mcp-development",
|
||||
"source": "typescript-mcp-development",
|
||||
"description": "Complete toolkit for building Model Context Protocol (MCP) servers in TypeScript/Node.js using the official SDK. Includes instructions for best practices, a prompt for generating servers, and an expert chat mode for guidance.",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"name": "typespec-m365-copilot",
|
||||
"source": "./plugins/typespec-m365-copilot",
|
||||
"source": "typespec-m365-copilot",
|
||||
"description": "Comprehensive collection of prompts, instructions, and resources for building declarative agents and API plugins using TypeSpec for Microsoft 365 Copilot extensibility.",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
|
||||
@@ -54,6 +54,7 @@ Team and project-specific instructions to enhance GitHub Copilot's behavior for
|
||||
| [Comprehensive Guide: Converting Spring Boot Cassandra Applications to use Azure Cosmos DB with Spring Data Cosmos (spring-data-cosmos)](../instructions/convert-cassandra-to-spring-data-cosmos.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fconvert-cassandra-to-spring-data-cosmos.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode-insiders%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fconvert-cassandra-to-spring-data-cosmos.instructions.md) | Step-by-step guide for converting Spring Boot Cassandra applications to use Azure Cosmos DB with Spring Data Cosmos |
|
||||
| [Containerization & Docker Best Practices](../instructions/containerization-docker-best-practices.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fcontainerization-docker-best-practices.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode-insiders%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fcontainerization-docker-best-practices.instructions.md) | Comprehensive best practices for creating optimized, secure, and efficient Docker images and managing containers. Covers multi-stage builds, image layer optimization, security scanning, and runtime best practices. |
|
||||
| [Context Engineering](../instructions/context-engineering.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fcontext-engineering.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode-insiders%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fcontext-engineering.instructions.md) | Guidelines for structuring code and projects to maximize GitHub Copilot effectiveness through better context management |
|
||||
| [Context7-aware development](../instructions/context7.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fcontext7.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode-insiders%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fcontext7.instructions.md) | Use Context7 for authoritative external docs and API references when local context is insufficient |
|
||||
| [Convert Spring JPA project to Spring Data Cosmos](../instructions/convert-jpa-to-spring-data-cosmos.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fconvert-jpa-to-spring-data-cosmos.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode-insiders%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fconvert-jpa-to-spring-data-cosmos.instructions.md) | Step-by-step guide for converting Spring Boot JPA applications to use Azure Cosmos DB with Spring Data Cosmos |
|
||||
| [Copilot Process tracking Instructions](../instructions/copilot-thought-logging.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fcopilot-thought-logging.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode-insiders%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fcopilot-thought-logging.instructions.md) | See process Copilot is following where you can edit this to reshape the interaction or save when follow up may be needed |
|
||||
| [Copilot Prompt Files Guidelines](../instructions/prompt.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fprompt.instructions.md)<br />[](https://aka.ms/awesome-copilot/install/instructions?url=vscode-insiders%3Achat-instructions%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Finstructions%2Fprompt.instructions.md) | Guidelines for creating high-quality prompt files for GitHub Copilot |
|
||||
|
||||
@@ -31,6 +31,7 @@ Ready-to-use prompt templates for specific development scenarios and tasks, defi
|
||||
| [Azure Cosmos DB NoSQL Data Modeling Expert System Prompt](../prompts/cosmosdb-datamodeling.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcosmosdb-datamodeling.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcosmosdb-datamodeling.prompt.md) | Step-by-step guide for capturing key application requirements for NoSQL use-case and produce Azure Cosmos DB Data NoSQL Model design using best practices and common patterns, artifacts_produced: "cosmosdb_requirements.md" file and "cosmosdb_data_model.md" file |
|
||||
| [Azure Cost Optimize](../prompts/az-cost-optimize.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Faz-cost-optimize.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Faz-cost-optimize.prompt.md) | Analyze Azure resources used in the app (IaC files and/or resources in a target rg) and optimize costs - creating GitHub issues for identified optimizations. |
|
||||
| [Azure Resource Health & Issue Diagnosis](../prompts/azure-resource-health-diagnose.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fazure-resource-health-diagnose.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fazure-resource-health-diagnose.prompt.md) | Analyze Azure resource health, diagnose issues from logs and telemetry, and create a remediation plan for identified problems. |
|
||||
| [BigQuery Pipeline Audit: Cost, Safety and Production Readiness](../prompts/bigquery-pipeline-audit.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbigquery-pipeline-audit.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fbigquery-pipeline-audit.prompt.md) | Audits Python + BigQuery pipelines for cost safety, idempotency, and production readiness. Returns a structured report with exact patch locations. |
|
||||
| [Boost Prompt](../prompts/boost-prompt.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fboost-prompt.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fboost-prompt.prompt.md) | Interactive prompt refinement workflow: interrogates scope, deliverables, constraints; copies final markdown to clipboard; never writes code. Requires the Joyride extension. |
|
||||
| [C# Async Programming Best Practices](../prompts/csharp-async.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-async.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-async.prompt.md) | Get best practices for C# async programming |
|
||||
| [C# Documentation Best Practices](../prompts/csharp-docs.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-docs.prompt.md)<br />[](https://aka.ms/awesome-copilot/install/prompt?url=vscode-insiders%3Achat-prompt%2Finstall%3Furl%3Dhttps%3A%2F%2Fraw.githubusercontent.com%2Fgithub%2Fawesome-copilot%2Fmain%2Fprompts%2Fcsharp-docs.prompt.md) | Ensure that C# types are documented with XML comments and follow best practices for documentation. |
|
||||
|
||||
62
eng/clean-materialized-plugins.mjs
Normal file
62
eng/clean-materialized-plugins.mjs
Normal file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import fs from "fs";
|
||||
import path from "path";
|
||||
import { ROOT_FOLDER } from "./constants.mjs";
|
||||
|
||||
const PLUGINS_DIR = path.join(ROOT_FOLDER, "plugins");
|
||||
const MATERIALIZED_DIRS = ["agents", "commands", "skills"];
|
||||
|
||||
function cleanPlugin(pluginPath) {
|
||||
let removed = 0;
|
||||
for (const subdir of MATERIALIZED_DIRS) {
|
||||
const target = path.join(pluginPath, subdir);
|
||||
if (fs.existsSync(target) && fs.statSync(target).isDirectory()) {
|
||||
const count = countFiles(target);
|
||||
fs.rmSync(target, { recursive: true, force: true });
|
||||
removed += count;
|
||||
console.log(` Removed ${path.basename(pluginPath)}/${subdir}/ (${count} files)`);
|
||||
}
|
||||
}
|
||||
return removed;
|
||||
}
|
||||
|
||||
function countFiles(dir) {
|
||||
let count = 0;
|
||||
for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
if (entry.isDirectory()) {
|
||||
count += countFiles(path.join(dir, entry.name));
|
||||
} else {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
function main() {
|
||||
console.log("Cleaning materialized files from plugins...\n");
|
||||
|
||||
if (!fs.existsSync(PLUGINS_DIR)) {
|
||||
console.error(`Error: plugins directory not found at ${PLUGINS_DIR}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const pluginDirs = fs.readdirSync(PLUGINS_DIR, { withFileTypes: true })
|
||||
.filter(entry => entry.isDirectory())
|
||||
.map(entry => entry.name)
|
||||
.sort();
|
||||
|
||||
let total = 0;
|
||||
for (const dirName of pluginDirs) {
|
||||
total += cleanPlugin(path.join(PLUGINS_DIR, dirName));
|
||||
}
|
||||
|
||||
console.log();
|
||||
if (total === 0) {
|
||||
console.log("✅ No materialized files found. Plugins are already clean.");
|
||||
} else {
|
||||
console.log(`✅ Removed ${total} materialized file(s) from plugins.`);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
@@ -14,12 +14,12 @@ const MARKETPLACE_FILE = path.join(ROOT_FOLDER, ".github/plugin", "marketplace.j
|
||||
*/
|
||||
function readPluginMetadata(pluginDir) {
|
||||
const pluginJsonPath = path.join(pluginDir, ".github/plugin", "plugin.json");
|
||||
|
||||
|
||||
if (!fs.existsSync(pluginJsonPath)) {
|
||||
console.warn(`Warning: No plugin.json found for ${path.basename(pluginDir)}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
const content = fs.readFileSync(pluginJsonPath, "utf8");
|
||||
return JSON.parse(content);
|
||||
@@ -34,30 +34,30 @@ function readPluginMetadata(pluginDir) {
|
||||
*/
|
||||
function generateMarketplace() {
|
||||
console.log("Generating marketplace.json...");
|
||||
|
||||
|
||||
if (!fs.existsSync(PLUGINS_DIR)) {
|
||||
console.error(`Error: Plugins directory not found at ${PLUGINS_DIR}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
|
||||
// Read all plugin directories
|
||||
const pluginDirs = fs.readdirSync(PLUGINS_DIR, { withFileTypes: true })
|
||||
.filter(entry => entry.isDirectory())
|
||||
.map(entry => entry.name)
|
||||
.sort();
|
||||
|
||||
|
||||
console.log(`Found ${pluginDirs.length} plugin directories`);
|
||||
|
||||
|
||||
// Read metadata for each plugin
|
||||
const plugins = [];
|
||||
for (const dirName of pluginDirs) {
|
||||
const pluginPath = path.join(PLUGINS_DIR, dirName);
|
||||
const metadata = readPluginMetadata(pluginPath);
|
||||
|
||||
|
||||
if (metadata) {
|
||||
plugins.push({
|
||||
name: metadata.name,
|
||||
source: `./plugins/${dirName}`,
|
||||
source: dirName,
|
||||
description: metadata.description,
|
||||
version: metadata.version || "1.0.0"
|
||||
});
|
||||
@@ -66,7 +66,7 @@ function generateMarketplace() {
|
||||
console.log(`✗ Skipped: ${dirName} (no valid plugin.json)`);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Create marketplace.json structure
|
||||
const marketplace = {
|
||||
name: "awesome-copilot",
|
||||
@@ -81,16 +81,16 @@ function generateMarketplace() {
|
||||
},
|
||||
plugins: plugins
|
||||
};
|
||||
|
||||
|
||||
// Ensure directory exists
|
||||
const marketplaceDir = path.dirname(MARKETPLACE_FILE);
|
||||
if (!fs.existsSync(marketplaceDir)) {
|
||||
fs.mkdirSync(marketplaceDir, { recursive: true });
|
||||
}
|
||||
|
||||
|
||||
// Write marketplace.json
|
||||
fs.writeFileSync(MARKETPLACE_FILE, JSON.stringify(marketplace, null, 2) + "\n");
|
||||
|
||||
|
||||
console.log(`\n✓ Successfully generated marketplace.json with ${plugins.length} plugins`);
|
||||
console.log(` Location: ${MARKETPLACE_FILE}`);
|
||||
}
|
||||
|
||||
106
instructions/context7.instructions.md
Normal file
106
instructions/context7.instructions.md
Normal file
@@ -0,0 +1,106 @@
|
||||
---
|
||||
description: 'Use Context7 for authoritative external docs and API references when local context is insufficient'
|
||||
applyTo: '**'
|
||||
---
|
||||
|
||||
# Context7-aware development
|
||||
|
||||
Use Context7 proactively whenever the task depends on **authoritative, current, version-specific external documentation** that is not present in the workspace context.
|
||||
|
||||
This instruction exists so you **do not require the user to type** “use context7” to get up-to-date docs.
|
||||
|
||||
## When to use Context7
|
||||
|
||||
Use Context7 before making decisions or writing code when you need any of the following:
|
||||
|
||||
- **Framework/library API details** (method signatures, configuration keys, expected behaviors).
|
||||
- **Version-sensitive guidance** (breaking changes, deprecations, new defaults).
|
||||
- **Correctness or security-critical patterns** (auth flows, crypto usage, deserialization rules).
|
||||
- **Interpreting unfamiliar error messages** that likely come from third-party tools.
|
||||
- **Best-practice implementation constraints** (rate limits, quotas, required headers, supported formats).
|
||||
|
||||
Also use Context7 when:
|
||||
|
||||
- The user references **a specific framework/library version** (e.g., “Next.js 15”, “React 19”, “AWS SDK v3”).
|
||||
- You’re about to recommend **non-trivial configuration** (CLI flags, config files, auth flows).
|
||||
- You’re unsure whether an API exists, changed names, or got deprecated.
|
||||
|
||||
Skip Context7 for:
|
||||
|
||||
- Purely local refactors, formatting, naming, or logic that is fully derivable from the repo.
|
||||
- Language fundamentals (no external APIs involved).
|
||||
|
||||
## What to fetch
|
||||
|
||||
When using Context7, prefer **primary sources** and narrow queries:
|
||||
|
||||
- Official docs (vendor/framework documentation)
|
||||
- Reference/API pages
|
||||
- Release notes / migration guides
|
||||
- Security advisories (when relevant)
|
||||
|
||||
Gather only what you need to proceed. If multiple candidates exist, pick the most authoritative/current.
|
||||
|
||||
Prefer fetching:
|
||||
|
||||
- The exact method/type/option you will use
|
||||
- The minimal surrounding context needed to avoid misuse (constraints, default behaviors, migration notes)
|
||||
|
||||
## How to incorporate results
|
||||
|
||||
- Translate findings into concrete code/config changes.
|
||||
- **Cite sources** with title + URL when the decision relies on external facts.
|
||||
- If docs conflict or are ambiguous, present the tradeoffs briefly and choose the safest default.
|
||||
|
||||
When the answer requires specific values (flags, config keys, headers), prefer:
|
||||
|
||||
- stating the exact value from docs
|
||||
- calling out defaults and caveats
|
||||
- providing a quick validation step (e.g., “run `--help`”, or a minimal smoke test)
|
||||
|
||||
## How to use Context7 MCP tools (auto)
|
||||
|
||||
When Context7 is available as an MCP server, use it automatically as follows.
|
||||
|
||||
### Tool workflow
|
||||
|
||||
1) **If the user provides a library ID**, use it directly.
|
||||
- Valid forms: `/owner/repo` or `/owner/repo/version` (for pinned versions).
|
||||
|
||||
2) Otherwise, **resolve the library ID** using:
|
||||
- Tool: `resolve-library-id`
|
||||
- Inputs:
|
||||
- `libraryName`: the library/framework name (e.g., “next.js”, “supabase”, “prisma”)
|
||||
- `query`: the user’s task (used to rank matches)
|
||||
|
||||
3) **Fetch relevant documentation** using:
|
||||
- Tool: `query-docs`
|
||||
- Inputs:
|
||||
- `libraryId`: the resolved (or user-supplied) library ID
|
||||
- `query`: the exact task/question you are answering
|
||||
|
||||
4) Only after docs are retrieved: **write the code/steps** based on those docs.
|
||||
|
||||
### Efficiency limits
|
||||
|
||||
- Do **not** call `resolve-library-id` more than **3 times** per user question.
|
||||
- Do **not** call `query-docs` more than **3 times** per user question.
|
||||
- If multiple good matches exist, pick the best one and proceed; ask a clarification question only when the choice materially affects the implementation.
|
||||
|
||||
### Version behavior
|
||||
|
||||
- If the user names a version, reflect it in the library ID when possible (e.g., `/vercel/next.js/v15.1.8`).
|
||||
- If you need reproducibility (CI/builds), prefer pinning to a specific version in examples.
|
||||
|
||||
## Failure handling
|
||||
|
||||
If Context7 cannot find a reliable source:
|
||||
|
||||
1. Say what you tried to verify.
|
||||
2. Proceed with a conservative, well-labeled assumption.
|
||||
3. Suggest a quick validation step (e.g., run a command, check a file, or consult a specific official page).
|
||||
|
||||
## Security & privacy
|
||||
|
||||
- Never request or echo API keys. If configuration requires a key, instruct storing it in environment variables.
|
||||
- Treat retrieved docs as **helpful but not infallible**; for security-sensitive code, prefer official vendor docs and add an explicit verification step.
|
||||
@@ -15,6 +15,7 @@
|
||||
"plugin:create": "node ./eng/create-plugin.mjs",
|
||||
"skill:validate": "node ./eng/validate-skills.mjs",
|
||||
"skill:create": "node ./eng/create-skill.mjs",
|
||||
"plugin:clean": "node ./eng/clean-materialized-plugins.mjs",
|
||||
"plugin:generate-marketplace": "node ./eng/generate-marketplace.mjs",
|
||||
"website:data": "node ./eng/generate-website-data.mjs",
|
||||
"website:dev": "npm run website:data && npm run --prefix website dev",
|
||||
|
||||
130
prompts/bigquery-pipeline-audit.prompt.md
Normal file
130
prompts/bigquery-pipeline-audit.prompt.md
Normal file
@@ -0,0 +1,130 @@
|
||||
---
|
||||
agent: 'agent'
|
||||
tools: ['search/codebase', 'edit/editFiles', 'search']
|
||||
description: 'Audits Python + BigQuery pipelines for cost safety, idempotency, and production readiness. Returns a structured report with exact patch locations.'
|
||||
---
|
||||
|
||||
# BigQuery Pipeline Audit: Cost, Safety and Production Readiness
|
||||
|
||||
You are a senior data engineer reviewing a Python + BigQuery pipeline script.
|
||||
Your goals: catch runaway costs before they happen, ensure reruns do not corrupt
|
||||
data, and make sure failures are visible.
|
||||
|
||||
Analyze the codebase and respond in the structure below (A to F + Final).
|
||||
Reference exact function names and line locations. Suggest minimal fixes, not
|
||||
rewrites.
|
||||
|
||||
---
|
||||
|
||||
## A) COST EXPOSURE: What will actually get billed?
|
||||
|
||||
Locate every BigQuery job trigger (`client.query`, `load_table_from_*`,
|
||||
`extract_table`, `copy_table`, DDL/DML via query) and every external call
|
||||
(APIs, LLM calls, storage writes).
|
||||
|
||||
For each, answer:
|
||||
- Is this inside a loop, retry block, or async gather?
|
||||
- What is the realistic worst-case call count?
|
||||
- For each `client.query`, is `QueryJobConfig.maximum_bytes_billed` set?
|
||||
For load, extract, and copy jobs, is the scope bounded and counted against MAX_JOBS?
|
||||
- Is the same SQL and params being executed more than once in a single run?
|
||||
Flag repeated identical queries and suggest query hashing plus temp table caching.
|
||||
|
||||
**Flag immediately if:**
|
||||
- Any BQ query runs once per date or once per entity in a loop
|
||||
- Worst-case BQ job count exceeds 20
|
||||
- `maximum_bytes_billed` is missing on any `client.query` call
|
||||
|
||||
---
|
||||
|
||||
## B) DRY RUN AND EXECUTION MODES
|
||||
|
||||
Verify a `--mode` flag exists with at least `dry_run` and `execute` options.
|
||||
|
||||
- `dry_run` must print the plan and estimated scope with zero billed BQ execution
|
||||
(BigQuery dry-run estimation via job config is allowed) and zero external API or LLM calls
|
||||
- `execute` requires explicit confirmation for prod (`--env=prod --confirm`)
|
||||
- Prod must not be the default environment
|
||||
|
||||
If missing, propose a minimal `argparse` patch with safe defaults.
|
||||
|
||||
---
|
||||
|
||||
## C) BACKFILL AND LOOP DESIGN
|
||||
|
||||
**Hard fail if:** the script runs one BQ query per date or per entity in a loop.
|
||||
|
||||
Check that date-range backfills use one of:
|
||||
1. A single set-based query with `GENERATE_DATE_ARRAY`
|
||||
2. A staging table loaded with all dates then one join query
|
||||
3. Explicit chunks with a hard `MAX_CHUNKS` cap
|
||||
|
||||
Also check:
|
||||
- Is the date range bounded by default (suggest 14 days max without `--override`)?
|
||||
- If the script crashes mid-run, is it safe to re-run without double-writing?
|
||||
- For backdated simulations, verify data is read from time-consistent snapshots
|
||||
(`FOR SYSTEM_TIME AS OF`, partitioned as-of tables, or dated snapshot tables).
|
||||
Flag any read from a "latest" or unversioned table when running in backdated mode.
|
||||
|
||||
Suggest a concrete rewrite if the current approach is row-by-row.
|
||||
|
||||
---
|
||||
|
||||
## D) QUERY SAFETY AND SCAN SIZE
|
||||
|
||||
For each query, check:
|
||||
- **Partition filter** is on the raw column, not `DATE(ts)`, `CAST(...)`, or
|
||||
any function that prevents pruning
|
||||
- **No `SELECT *`**: only columns actually used downstream
|
||||
- **Joins will not explode**: verify join keys are unique or appropriately scoped
|
||||
and flag any potential many-to-many
|
||||
- **Expensive operations** (`REGEXP`, `JSON_EXTRACT`, UDFs) only run after
|
||||
partition filtering, not on full table scans
|
||||
|
||||
Provide a specific SQL fix for any query that fails these checks.
|
||||
|
||||
---
|
||||
|
||||
## E) SAFE WRITES AND IDEMPOTENCY
|
||||
|
||||
Identify every write operation. Flag plain `INSERT`/append with no dedup logic.
|
||||
|
||||
Each write should use one of:
|
||||
1. `MERGE` on a deterministic key (e.g., `entity_id + date + model_version`)
|
||||
2. Write to a staging table scoped to the run, then swap or merge into final
|
||||
3. Append-only with a dedupe view:
|
||||
`QUALIFY ROW_NUMBER() OVER (PARTITION BY <key>) = 1`
|
||||
|
||||
Also check:
|
||||
- Will a re-run create duplicate rows?
|
||||
- Is the write disposition (`WRITE_TRUNCATE` vs `WRITE_APPEND`) intentional
|
||||
and documented?
|
||||
- Is `run_id` being used as part of the merge or dedupe key? If so, flag it.
|
||||
`run_id` should be stored as a metadata column, not as part of the uniqueness
|
||||
key, unless you explicitly want multi-run history.
|
||||
|
||||
State the recommended approach and the exact dedup key for this codebase.
|
||||
|
||||
---
|
||||
|
||||
## F) OBSERVABILITY: Can you debug a failure?
|
||||
|
||||
Verify:
|
||||
- Failures raise exceptions and abort with no silent `except: pass` or warn-only
|
||||
- Each BQ job logs: job ID, bytes processed or billed when available,
|
||||
slot milliseconds, and duration
|
||||
- A run summary is logged or written at the end containing:
|
||||
`run_id, env, mode, date_range, tables written, total BQ jobs, total bytes`
|
||||
- `run_id` is present and consistent across all log lines
|
||||
|
||||
If `run_id` is missing, propose a one-line fix:
|
||||
`run_id = run_id or datetime.utcnow().strftime('%Y%m%dT%H%M%S')`
|
||||
|
||||
---
|
||||
|
||||
## Final
|
||||
|
||||
**1. PASS / FAIL** with specific reasons per section (A to F).
|
||||
**2. Patch list** ordered by risk, referencing exact functions to change.
|
||||
**3. If FAIL: Top 3 cost risks** with a rough worst-case estimate
|
||||
(e.g., "loop over 90 dates x 3 retries = 270 BQ jobs").
|
||||
Reference in New Issue
Block a user