feat: L'Ami Fiduciaire V1.0.0 — full codebase with Story 0.1 complete

Initial commit of the L'Ami Fiduciaire SaaS platform built on Laravel 12,
Vue 3, Inertia.js 2, and Tailwind CSS 4.

Story 0.1 (rename folders to declarations in database) is implemented and
code-reviewed: migration, rollback, and 6 Pest tests all passing.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-11 23:33:10 +00:00
commit 35545c2a8f
1517 changed files with 246774 additions and 0 deletions

View File

@@ -0,0 +1,155 @@
# Azure DevOps CI/CD Pipeline for Test Execution
# Generated by BMad TEA Agent - Test Architect Module
# Optimized for: Parallel Sharding, Burn-In Loop
# Stack: {test_stack_type} | Framework: {test_framework}
#
# Variables to customize per project:
# INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile)
# TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
# LINT_CMD - lint command (e.g., npm run lint)
# BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
# DEFAULT_NODE_VERSION - Node.js version (read from .nvmrc or default to 24)
trigger:
branches:
include:
- main
- develop
pr:
branches:
include:
- main
- develop
variables:
DEFAULT_NODE_VERSION: "24"
npm_config_cache: $(Pipeline.Workspace)/.npm
# Set TEST_STACK_TYPE to 'backend' to skip Playwright browser installs
TEST_STACK_TYPE: "" # Values: frontend, backend, fullstack (leave empty for auto)
stages:
# Lint stage - Code quality checks
- stage: Lint
displayName: "Lint"
jobs:
- job: LintJob
displayName: "Code Quality"
pool:
vmImage: "ubuntu-latest"
timeoutInMinutes: 5
steps:
- task: NodeTool@0
inputs:
versionSpec: $(DEFAULT_NODE_VERSION)
displayName: "Setup Node.js"
- task: Cache@2
inputs:
key: 'npm | "$(Agent.OS)" | package-lock.json'
restoreKeys: 'npm | "$(Agent.OS)"'
path: $(npm_config_cache)
displayName: "Cache npm"
- script: npm ci
displayName: "Install dependencies" # Replace with INSTALL_CMD
- script: npm run lint
displayName: "Run linter" # Replace with LINT_CMD
# Test stage - Parallel execution with sharding
- stage: Test
displayName: "Test"
dependsOn: Lint
jobs:
- job: TestShard
displayName: "Test Shard"
pool:
vmImage: "ubuntu-latest"
timeoutInMinutes: 30
strategy:
matrix:
Shard1:
SHARD_INDEX: 1
Shard2:
SHARD_INDEX: 2
Shard3:
SHARD_INDEX: 3
Shard4:
SHARD_INDEX: 4
steps:
- task: NodeTool@0
inputs:
versionSpec: $(DEFAULT_NODE_VERSION)
displayName: "Setup Node.js"
- task: Cache@2
inputs:
key: 'npm | "$(Agent.OS)" | package-lock.json'
restoreKeys: 'npm | "$(Agent.OS)"'
path: $(npm_config_cache)
displayName: "Cache npm"
- script: npm ci
displayName: "Install dependencies" # Replace with INSTALL_CMD
# Frontend/Fullstack only — skipped for backend-only stacks
- script: npx playwright install --with-deps chromium
condition: ne(variables['TEST_STACK_TYPE'], 'backend')
displayName: "Install Playwright browsers" # Replace with BROWSER_INSTALL
- script: npm run test:e2e -- --shard=$(SHARD_INDEX)/4
displayName: "Run tests (shard $(SHARD_INDEX)/4)" # Replace with TEST_CMD + shard args
- task: PublishTestResults@2
condition: always()
inputs:
testResultsFormat: "JUnit"
testResultsFiles: "test-results/**/*.xml"
mergeTestResults: true
displayName: "Publish test results"
- publish: test-results/
artifact: test-results-$(SHARD_INDEX)
condition: failed()
displayName: "Upload failure artifacts"
# Burn-in stage - Flaky test detection
# Note: Burn-in targets UI flakiness. For backend-only stacks, remove this stage entirely.
- stage: BurnIn
displayName: "Burn-In (Flaky Detection)"
dependsOn: Test
condition: and(succeeded(), or(eq(variables['Build.Reason'], 'PullRequest'), eq(variables['Build.CronSchedule.DisplayName'], 'Weekly burn-in')))
jobs:
- job: BurnInJob
displayName: "Burn-In Loop"
pool:
vmImage: "ubuntu-latest"
timeoutInMinutes: 60
steps:
- task: NodeTool@0
inputs:
versionSpec: $(DEFAULT_NODE_VERSION)
displayName: "Setup Node.js"
- script: npm ci
displayName: "Install dependencies" # Replace with INSTALL_CMD
# Frontend/Fullstack only — skipped for backend-only stacks
- script: npx playwright install --with-deps chromium
condition: ne(variables['TEST_STACK_TYPE'], 'backend')
displayName: "Install Playwright browsers" # Replace with BROWSER_INSTALL
- script: |
echo "Starting burn-in loop - detecting flaky tests"
for i in $(seq 1 10); do
echo "Burn-in iteration $i/10"
npm run test:e2e || exit 1
done
echo "Burn-in complete - no flaky tests detected"
displayName: "Run burn-in loop (10 iterations)" # Replace npm run test:e2e with TEST_CMD
- publish: test-results/
artifact: burn-in-failures
condition: failed()
displayName: "Upload burn-in failure artifacts"

View File

@@ -0,0 +1,155 @@
# Azure DevOps CI/CD Pipeline for Test Execution
# Generated by BMad TEA Agent - Test Architect Module
# Optimized for: Parallel Sharding, Burn-In Loop
# Stack: {test_stack_type} | Framework: {test_framework}
#
# Variables to customize per project:
# INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile)
# TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
# LINT_CMD - lint command (e.g., npm run lint)
# BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
# DEFAULT_NODE_VERSION - Node.js version (read from .nvmrc or default to 24)
trigger:
branches:
include:
- main
- develop
pr:
branches:
include:
- main
- develop
variables:
DEFAULT_NODE_VERSION: "24"
npm_config_cache: $(Pipeline.Workspace)/.npm
# Set TEST_STACK_TYPE to 'backend' to skip Playwright browser installs
TEST_STACK_TYPE: "" # Values: frontend, backend, fullstack (leave empty for auto)
stages:
# Lint stage - Code quality checks
- stage: Lint
displayName: "Lint"
jobs:
- job: LintJob
displayName: "Code Quality"
pool:
vmImage: "ubuntu-latest"
timeoutInMinutes: 5
steps:
- task: NodeTool@0
inputs:
versionSpec: $(DEFAULT_NODE_VERSION)
displayName: "Setup Node.js"
- task: Cache@2
inputs:
key: 'npm | "$(Agent.OS)" | package-lock.json'
restoreKeys: 'npm | "$(Agent.OS)"'
path: $(npm_config_cache)
displayName: "Cache npm"
- script: npm ci
displayName: "Install dependencies" # Replace with INSTALL_CMD
- script: npm run lint
displayName: "Run linter" # Replace with LINT_CMD
# Test stage - Parallel execution with sharding
- stage: Test
displayName: "Test"
dependsOn: Lint
jobs:
- job: TestShard
displayName: "Test Shard"
pool:
vmImage: "ubuntu-latest"
timeoutInMinutes: 30
strategy:
matrix:
Shard1:
SHARD_INDEX: 1
Shard2:
SHARD_INDEX: 2
Shard3:
SHARD_INDEX: 3
Shard4:
SHARD_INDEX: 4
steps:
- task: NodeTool@0
inputs:
versionSpec: $(DEFAULT_NODE_VERSION)
displayName: "Setup Node.js"
- task: Cache@2
inputs:
key: 'npm | "$(Agent.OS)" | package-lock.json'
restoreKeys: 'npm | "$(Agent.OS)"'
path: $(npm_config_cache)
displayName: "Cache npm"
- script: npm ci
displayName: "Install dependencies" # Replace with INSTALL_CMD
# Frontend/Fullstack only — skipped for backend-only stacks
- script: npx playwright install --with-deps chromium
condition: ne(variables['TEST_STACK_TYPE'], 'backend')
displayName: "Install Playwright browsers" # Replace with BROWSER_INSTALL
- script: npm run test:e2e -- --shard=$(SHARD_INDEX)/4
displayName: "Run tests (shard $(SHARD_INDEX)/4)" # Replace with TEST_CMD + shard args
- task: PublishTestResults@2
condition: always()
inputs:
testResultsFormat: "JUnit"
testResultsFiles: "test-results/**/*.xml"
mergeTestResults: true
displayName: "Publish test results"
- publish: test-results/
artifact: test-results-$(SHARD_INDEX)
condition: failed()
displayName: "Upload failure artifacts"
# Burn-in stage - Flaky test detection
# Note: Burn-in targets UI flakiness. For backend-only stacks, remove this stage entirely.
- stage: BurnIn
displayName: "Burn-In (Flaky Detection)"
dependsOn: Test
condition: and(succeeded(), or(eq(variables['Build.Reason'], 'PullRequest'), eq(variables['Build.CronSchedule.DisplayName'], 'Weekly burn-in')))
jobs:
- job: BurnInJob
displayName: "Burn-In Loop"
pool:
vmImage: "ubuntu-latest"
timeoutInMinutes: 60
steps:
- task: NodeTool@0
inputs:
versionSpec: $(DEFAULT_NODE_VERSION)
displayName: "Setup Node.js"
- script: npm ci
displayName: "Install dependencies" # Replace with INSTALL_CMD
# Frontend/Fullstack only — skipped for backend-only stacks
- script: npx playwright install --with-deps chromium
condition: ne(variables['TEST_STACK_TYPE'], 'backend')
displayName: "Install Playwright browsers" # Replace with BROWSER_INSTALL
- script: |
echo "Starting burn-in loop - detecting flaky tests"
for i in $(seq 1 10); do
echo "Burn-in iteration $i/10"
npm run test:e2e || exit 1
done
echo "Burn-in complete - no flaky tests detected"
displayName: "Run burn-in loop (10 iterations)" # Replace npm run test:e2e with TEST_CMD
- publish: test-results/
artifact: burn-in-failures
condition: failed()
displayName: "Upload burn-in failure artifacts"

View File

@@ -0,0 +1,289 @@
# CI/CD Pipeline Setup - Validation Checklist
## Prerequisites
- [ ] Git repository initialized (`.git/` exists)
- [ ] Git remote configured (`git remote -v` shows origin)
- [ ] Test framework configured (appropriate config for detected stack type)
- [ ] Local tests pass (test command succeeds)
- [ ] Team agrees on CI platform
- [ ] Access to CI platform settings (if updating)
### Multi-Stack Detection
- [ ] Test stack type detected or configured (`frontend`, `backend`, `fullstack`)
- [ ] Test framework detected or configured (Playwright, Cypress, Jest, Vitest, etc.)
- [ ] Stack-appropriate test commands identified
### Multi-Platform Detection
- [ ] CI platform detected or configured
- [ ] Supported platform: GitHub Actions, GitLab CI, Jenkins, Azure DevOps, Harness, or Circle CI
- [ ] Platform-specific template selected
Note: CI setup is typically a one-time task per repo and can be run any time after the test framework is configured.
## Process Steps
### Step 1: Preflight Checks
- [ ] Git repository validated
- [ ] Framework configuration detected
- [ ] Local test execution successful
- [ ] CI platform detected or selected
- [ ] Node version identified (.nvmrc or default)
- [ ] No blocking issues found
### Step 2: CI Pipeline Configuration
- [ ] CI configuration file created at platform-correct path
- GitHub Actions: `.github/workflows/test.yml`
- GitLab CI: `.gitlab-ci.yml`
- Jenkins: `Jenkinsfile`
- Azure DevOps: `azure-pipelines.yml`
- Harness: `.harness/pipeline.yaml`
- Circle CI: `.circleci/config.yml`
- [ ] File is syntactically valid (no YAML/Groovy errors)
- [ ] Correct framework commands configured for detected stack type
- [ ] Node version matches project
- [ ] Test directory paths correct
- [ ] Stack-conditional steps applied:
- [ ] Browser install included for frontend/fullstack stacks
- [ ] Browser install omitted for backend-only stacks
- [ ] Test commands match detected framework
### Step 3: Parallel Sharding
- [ ] Matrix strategy configured (4 shards default)
- [ ] Shard syntax correct for framework
- [ ] fail-fast set to false
- [ ] Shard count appropriate for test suite size
### Step 4: Burn-In Loop
- [ ] Burn-in job created (frontend/fullstack stacks) or intentionally skipped (backend-only)
- [ ] 10 iterations configured (when enabled)
- [ ] Proper exit on failure (`|| exit 1`)
- [ ] Runs on appropriate triggers (PR, cron)
- [ ] Failure artifacts uploaded
- [ ] Backend-only stacks: burn-in skipped by default (documented reason: targets UI flakiness)
### Step 5: Caching Configuration
- [ ] Dependency cache configured (npm/yarn)
- [ ] Cache key uses lockfile hash
- [ ] Browser cache configured (Playwright/Cypress)
- [ ] Restore-keys defined for fallback
- [ ] Cache paths correct for platform
### Step 6: Artifact Collection
- [ ] Artifacts upload on failure only
- [ ] Correct artifact paths (test-results/, traces/, etc.)
- [ ] Retention days set (30 default)
- [ ] Artifact names unique per shard
- [ ] No sensitive data in artifacts
### Step 7: Retry Logic
- [ ] Retry action/strategy configured
- [ ] Max attempts: 2-3
- [ ] Timeout appropriate (30 min)
- [ ] Retry only on transient errors
### Step 8: Helper Scripts
- [ ] `scripts/test-changed.sh` created
- [ ] `scripts/ci-local.sh` created
- [ ] `scripts/burn-in.sh` created (optional)
- [ ] Scripts are executable (`chmod +x`)
- [ ] Scripts use correct test commands
- [ ] Shebang present (`#!/bin/bash`)
### Step 9: Documentation
- [ ] `docs/ci.md` created with pipeline guide
- [ ] `docs/ci-secrets-checklist.md` created
- [ ] Required secrets documented
- [ ] Setup instructions clear
- [ ] Troubleshooting section included
- [ ] Badge URLs provided (optional)
## Output Validation
### Configuration Validation
- [ ] CI file loads without errors
- [ ] All paths resolve correctly
- [ ] No hardcoded values (use env vars)
- [ ] Triggers configured (push, pull_request, schedule)
- [ ] Platform-specific syntax correct
### Execution Validation
- [ ] First CI run triggered (push to remote)
- [ ] Pipeline starts without errors
- [ ] All jobs appear in CI dashboard
- [ ] Caching works (check logs for cache hit)
- [ ] Tests execute in parallel
- [ ] Artifacts collected on failure
### Performance Validation
- [ ] Lint stage: <2 minutes
- [ ] Test stage (per shard): <10 minutes
- [ ] Burn-in stage: <30 minutes
- [ ] Total pipeline: <45 minutes
- [ ] Cache reduces install time by 2-5 minutes
## Quality Checks
### Best Practices Compliance
- [ ] Burn-in loop follows production patterns
- [ ] Parallel sharding configured optimally
- [ ] Failure-only artifact collection
- [ ] Selective testing enabled (optional)
- [ ] Retry logic handles transient failures only
- [ ] No secrets in configuration files
### Knowledge Base Alignment
- [ ] Burn-in pattern matches `ci-burn-in.md`
- [ ] Selective testing matches `selective-testing.md`
- [ ] Artifact collection matches `visual-debugging.md`
- [ ] Test quality matches `test-quality.md`
### Security Checks
- [ ] No credentials in CI configuration
- [ ] Secrets use platform secret management
- [ ] Environment variables for sensitive data
- [ ] Artifact retention appropriate (not too long)
- [ ] No debug output exposing secrets
- [ ] **MUST**: No `${{ inputs.* }}` or user-controlled GitHub context (`github.event.pull_request.title`, `github.event.issue.body`, `github.event.comment.body`, `github.head_ref`) directly in `run:` blocks — all passed through `env:` intermediaries and referenced as `"$ENV_VAR"`
## Integration Points
### Status File Integration
- [ ] CI setup logged in Quality & Testing Progress section
- [ ] Status updated with completion timestamp
- [ ] Platform and configuration noted
### Knowledge Base Integration
- [ ] Relevant knowledge fragments loaded
- [ ] Patterns applied from knowledge base
- [ ] Documentation references knowledge base
- [ ] Knowledge base references in README
### Workflow Dependencies
- [ ] `framework` workflow completed first
- [ ] Can proceed to `atdd` workflow after CI setup
- [ ] Can proceed to `automate` workflow
- [ ] CI integrates with `gate` workflow
## Completion Criteria
**All must be true:**
- [ ] All prerequisites met
- [ ] All process steps completed
- [ ] All output validations passed
- [ ] All quality checks passed
- [ ] All integration points verified
- [ ] First CI run successful
- [ ] Performance targets met
- [ ] Documentation complete
## Post-Workflow Actions
**User must complete:**
1. [ ] Commit CI configuration
2. [ ] Push to remote repository
3. [ ] Configure required secrets in CI platform
4. [ ] Open PR to trigger first CI run
5. [ ] Monitor and verify pipeline execution
6. [ ] Adjust parallelism if needed (based on actual run times)
7. [ ] Set up notifications (optional)
**Recommended next workflows:**
1. [ ] Run `atdd` workflow for test generation
2. [ ] Run `automate` workflow for coverage expansion
3. [ ] Run `gate` workflow for quality gates
## Rollback Procedure
If workflow fails:
1. [ ] Delete CI configuration file
2. [ ] Remove helper scripts directory
3. [ ] Remove documentation (docs/ci.md, etc.)
4. [ ] Clear CI platform secrets (if added)
5. [ ] Review error logs
6. [ ] Fix issues and retry workflow
## Notes
### Common Issues
**Issue**: CI file syntax errors
- **Solution**: Validate YAML syntax online or with linter
**Issue**: Tests fail in CI but pass locally
- **Solution**: Use `scripts/ci-local.sh` to mirror CI environment
**Issue**: Caching not working
- **Solution**: Check cache key formula, verify paths
**Issue**: Burn-in too slow
- **Solution**: Reduce iterations or run on cron only
### Platform-Specific
**GitHub Actions:**
- Secrets: Repository Settings → Secrets and variables → Actions
- Runners: Ubuntu latest recommended
- Concurrency limits: 20 jobs for free tier
**GitLab CI:**
- Variables: Project Settings → CI/CD → Variables
- Runners: Shared or project-specific
- Pipeline quota: 400 minutes/month free tier
**Jenkins:**
- Credentials: Manage Jenkins → Manage Credentials
- Agents: Configure build agents with Node.js
- Plugins: Pipeline, JUnit, HTML Publisher recommended
**Azure DevOps:**
- Variables: Pipelines → Library → Variable groups
- Agent pools: Azure-hosted or self-hosted
- Parallel jobs: 1 free (Microsoft-hosted)
**Harness:**
- Connectors: Configure container registry and code repo connectors
- Delegates: Install Harness delegate in target infrastructure
- Steps: Use Run steps with appropriate container images
---
**Checklist Complete**: Sign off when all items validated.
**Completed by:** {name}
**Date:** {date}
**Platform:** {GitHub Actions, GitLab CI, Other}
**Notes:** {notes}

View File

@@ -0,0 +1,289 @@
# CI/CD Pipeline Setup - Validation Checklist
## Prerequisites
- [ ] Git repository initialized (`.git/` exists)
- [ ] Git remote configured (`git remote -v` shows origin)
- [ ] Test framework configured (appropriate config for detected stack type)
- [ ] Local tests pass (test command succeeds)
- [ ] Team agrees on CI platform
- [ ] Access to CI platform settings (if updating)
### Multi-Stack Detection
- [ ] Test stack type detected or configured (`frontend`, `backend`, `fullstack`)
- [ ] Test framework detected or configured (Playwright, Cypress, Jest, Vitest, etc.)
- [ ] Stack-appropriate test commands identified
### Multi-Platform Detection
- [ ] CI platform detected or configured
- [ ] Supported platform: GitHub Actions, GitLab CI, Jenkins, Azure DevOps, Harness, or Circle CI
- [ ] Platform-specific template selected
Note: CI setup is typically a one-time task per repo and can be run any time after the test framework is configured.
## Process Steps
### Step 1: Preflight Checks
- [ ] Git repository validated
- [ ] Framework configuration detected
- [ ] Local test execution successful
- [ ] CI platform detected or selected
- [ ] Node version identified (.nvmrc or default)
- [ ] No blocking issues found
### Step 2: CI Pipeline Configuration
- [ ] CI configuration file created at platform-correct path
- GitHub Actions: `.github/workflows/test.yml`
- GitLab CI: `.gitlab-ci.yml`
- Jenkins: `Jenkinsfile`
- Azure DevOps: `azure-pipelines.yml`
- Harness: `.harness/pipeline.yaml`
- Circle CI: `.circleci/config.yml`
- [ ] File is syntactically valid (no YAML/Groovy errors)
- [ ] Correct framework commands configured for detected stack type
- [ ] Node version matches project
- [ ] Test directory paths correct
- [ ] Stack-conditional steps applied:
- [ ] Browser install included for frontend/fullstack stacks
- [ ] Browser install omitted for backend-only stacks
- [ ] Test commands match detected framework
### Step 3: Parallel Sharding
- [ ] Matrix strategy configured (4 shards default)
- [ ] Shard syntax correct for framework
- [ ] fail-fast set to false
- [ ] Shard count appropriate for test suite size
### Step 4: Burn-In Loop
- [ ] Burn-in job created (frontend/fullstack stacks) or intentionally skipped (backend-only)
- [ ] 10 iterations configured (when enabled)
- [ ] Proper exit on failure (`|| exit 1`)
- [ ] Runs on appropriate triggers (PR, cron)
- [ ] Failure artifacts uploaded
- [ ] Backend-only stacks: burn-in skipped by default (documented reason: targets UI flakiness)
### Step 5: Caching Configuration
- [ ] Dependency cache configured (npm/yarn)
- [ ] Cache key uses lockfile hash
- [ ] Browser cache configured (Playwright/Cypress)
- [ ] Restore-keys defined for fallback
- [ ] Cache paths correct for platform
### Step 6: Artifact Collection
- [ ] Artifacts upload on failure only
- [ ] Correct artifact paths (test-results/, traces/, etc.)
- [ ] Retention days set (30 default)
- [ ] Artifact names unique per shard
- [ ] No sensitive data in artifacts
### Step 7: Retry Logic
- [ ] Retry action/strategy configured
- [ ] Max attempts: 2-3
- [ ] Timeout appropriate (30 min)
- [ ] Retry only on transient errors
### Step 8: Helper Scripts
- [ ] `scripts/test-changed.sh` created
- [ ] `scripts/ci-local.sh` created
- [ ] `scripts/burn-in.sh` created (optional)
- [ ] Scripts are executable (`chmod +x`)
- [ ] Scripts use correct test commands
- [ ] Shebang present (`#!/bin/bash`)
### Step 9: Documentation
- [ ] `docs/ci.md` created with pipeline guide
- [ ] `docs/ci-secrets-checklist.md` created
- [ ] Required secrets documented
- [ ] Setup instructions clear
- [ ] Troubleshooting section included
- [ ] Badge URLs provided (optional)
## Output Validation
### Configuration Validation
- [ ] CI file loads without errors
- [ ] All paths resolve correctly
- [ ] No hardcoded values (use env vars)
- [ ] Triggers configured (push, pull_request, schedule)
- [ ] Platform-specific syntax correct
### Execution Validation
- [ ] First CI run triggered (push to remote)
- [ ] Pipeline starts without errors
- [ ] All jobs appear in CI dashboard
- [ ] Caching works (check logs for cache hit)
- [ ] Tests execute in parallel
- [ ] Artifacts collected on failure
### Performance Validation
- [ ] Lint stage: <2 minutes
- [ ] Test stage (per shard): <10 minutes
- [ ] Burn-in stage: <30 minutes
- [ ] Total pipeline: <45 minutes
- [ ] Cache reduces install time by 2-5 minutes
## Quality Checks
### Best Practices Compliance
- [ ] Burn-in loop follows production patterns
- [ ] Parallel sharding configured optimally
- [ ] Failure-only artifact collection
- [ ] Selective testing enabled (optional)
- [ ] Retry logic handles transient failures only
- [ ] No secrets in configuration files
### Knowledge Base Alignment
- [ ] Burn-in pattern matches `ci-burn-in.md`
- [ ] Selective testing matches `selective-testing.md`
- [ ] Artifact collection matches `visual-debugging.md`
- [ ] Test quality matches `test-quality.md`
### Security Checks
- [ ] No credentials in CI configuration
- [ ] Secrets use platform secret management
- [ ] Environment variables for sensitive data
- [ ] Artifact retention appropriate (not too long)
- [ ] No debug output exposing secrets
- [ ] **MUST**: No `${{ inputs.* }}` or user-controlled GitHub context (`github.event.pull_request.title`, `github.event.issue.body`, `github.event.comment.body`, `github.head_ref`) directly in `run:` blocks — all passed through `env:` intermediaries and referenced as `"$ENV_VAR"`
## Integration Points
### Status File Integration
- [ ] CI setup logged in Quality & Testing Progress section
- [ ] Status updated with completion timestamp
- [ ] Platform and configuration noted
### Knowledge Base Integration
- [ ] Relevant knowledge fragments loaded
- [ ] Patterns applied from knowledge base
- [ ] Documentation references knowledge base
- [ ] Knowledge base references in README
### Workflow Dependencies
- [ ] `framework` workflow completed first
- [ ] Can proceed to `atdd` workflow after CI setup
- [ ] Can proceed to `automate` workflow
- [ ] CI integrates with `gate` workflow
## Completion Criteria
**All must be true:**
- [ ] All prerequisites met
- [ ] All process steps completed
- [ ] All output validations passed
- [ ] All quality checks passed
- [ ] All integration points verified
- [ ] First CI run successful
- [ ] Performance targets met
- [ ] Documentation complete
## Post-Workflow Actions
**User must complete:**
1. [ ] Commit CI configuration
2. [ ] Push to remote repository
3. [ ] Configure required secrets in CI platform
4. [ ] Open PR to trigger first CI run
5. [ ] Monitor and verify pipeline execution
6. [ ] Adjust parallelism if needed (based on actual run times)
7. [ ] Set up notifications (optional)
**Recommended next workflows:**
1. [ ] Run `atdd` workflow for test generation
2. [ ] Run `automate` workflow for coverage expansion
3. [ ] Run `gate` workflow for quality gates
## Rollback Procedure
If workflow fails:
1. [ ] Delete CI configuration file
2. [ ] Remove helper scripts directory
3. [ ] Remove documentation (docs/ci.md, etc.)
4. [ ] Clear CI platform secrets (if added)
5. [ ] Review error logs
6. [ ] Fix issues and retry workflow
## Notes
### Common Issues
**Issue**: CI file syntax errors
- **Solution**: Validate YAML syntax online or with linter
**Issue**: Tests fail in CI but pass locally
- **Solution**: Use `scripts/ci-local.sh` to mirror CI environment
**Issue**: Caching not working
- **Solution**: Check cache key formula, verify paths
**Issue**: Burn-in too slow
- **Solution**: Reduce iterations or run on cron only
### Platform-Specific
**GitHub Actions:**
- Secrets: Repository Settings → Secrets and variables → Actions
- Runners: Ubuntu latest recommended
- Concurrency limits: 20 jobs for free tier
**GitLab CI:**
- Variables: Project Settings → CI/CD → Variables
- Runners: Shared or project-specific
- Pipeline quota: 400 minutes/month free tier
**Jenkins:**
- Credentials: Manage Jenkins → Manage Credentials
- Agents: Configure build agents with Node.js
- Plugins: Pipeline, JUnit, HTML Publisher recommended
**Azure DevOps:**
- Variables: Pipelines → Library → Variable groups
- Agent pools: Azure-hosted or self-hosted
- Parallel jobs: 1 free (Microsoft-hosted)
**Harness:**
- Connectors: Configure container registry and code repo connectors
- Delegates: Install Harness delegate in target infrastructure
- Steps: Use Run steps with appropriate container images
---
**Checklist Complete**: Sign off when all items validated.
**Completed by:** {name}
**Date:** {date}
**Platform:** {GitHub Actions, GitLab CI, Other}
**Notes:** {notes}

View File

@@ -0,0 +1,328 @@
# GitHub Actions CI/CD Pipeline for Test Execution
# Generated by BMad TEA Agent - Test Architect Module
# Optimized for: Parallel Sharding, Burn-In Loop
# Stack: {test_stack_type} | Framework: {test_framework}
#
# Variables to customize per project:
# INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile, yarn --frozen-lockfile)
# TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
# LINT_CMD - lint command (e.g., npm run lint)
# BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
# BROWSER_CACHE_PATH - browser cache path (frontend/fullstack only; omit for backend)
name: Test Pipeline
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
schedule:
# Weekly burn-in on Sundays at 2 AM UTC
- cron: "0 2 * * 0"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Lint stage - Code quality checks
lint:
name: Lint
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- name: Determine Node version
id: node-version
run: |
if [ -f .nvmrc ]; then
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
echo "Using Node from .nvmrc"
else
echo "value=24" >> "$GITHUB_OUTPUT"
echo "Using default Node 24 (current LTS)"
fi
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ steps.node-version.outputs.value }}
cache: "npm"
- name: Install dependencies
run: npm ci # Replace with INSTALL_CMD
- name: Run linter
run: npm run lint # Replace with LINT_CMD
# Test stage - Parallel execution with sharding
test:
name: Test (Shard ${{ matrix.shard }})
runs-on: ubuntu-latest
timeout-minutes: 30
needs: lint
strategy:
fail-fast: false
matrix:
shard: [1, 2, 3, 4]
steps:
- uses: actions/checkout@v4
- name: Determine Node version
id: node-version
run: |
if [ -f .nvmrc ]; then
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
echo "Using Node from .nvmrc"
else
echo "value=22" >> "$GITHUB_OUTPUT"
echo "Using default Node 22 (current LTS)"
fi
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ steps.node-version.outputs.value }}
cache: "npm"
- name: Cache Playwright browsers
uses: actions/cache@v4
with:
path: ~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-playwright-
- name: Install dependencies
run: npm ci # Replace with INSTALL_CMD
# Frontend/Fullstack only — remove this step for backend-only stacks
- name: Install Playwright browsers
run: npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL
- name: Run tests (shard ${{ matrix.shard }}/4)
run: npm run test:e2e -- --shard=${{ matrix.shard }}/4 # Replace with TEST_CMD + shard args
- name: Upload test results
if: failure()
uses: actions/upload-artifact@v4
with:
name: test-results-${{ matrix.shard }}
path: |
test-results/
playwright-report/
retention-days: 30
# Burn-in stage - Flaky test detection
burn-in:
name: Burn-In (Flaky Detection)
runs-on: ubuntu-latest
timeout-minutes: 60
needs: test
# Only run burn-in on PRs to main/develop or on schedule
if: github.event_name == 'pull_request' || github.event_name == 'schedule'
steps:
- uses: actions/checkout@v4
- name: Determine Node version
id: node-version
run: |
if [ -f .nvmrc ]; then
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
echo "Using Node from .nvmrc"
else
echo "value=22" >> "$GITHUB_OUTPUT"
echo "Using default Node 22 (current LTS)"
fi
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ steps.node-version.outputs.value }}
cache: "npm"
# Frontend/Fullstack only — remove this step for backend-only stacks
- name: Cache Playwright browsers
uses: actions/cache@v4
with:
path: ~/.cache/ms-playwright # Replace with BROWSER_CACHE_PATH
key: ${{ runner.os }}-playwright-${{ hashFiles('**/package-lock.json') }}
- name: Install dependencies
run: npm ci # Replace with INSTALL_CMD
# Frontend/Fullstack only — remove this step for backend-only stacks
- name: Install Playwright browsers
run: npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL
# Note: Burn-in targets UI flakiness. For backend-only stacks, remove this job entirely.
- name: Run burn-in loop (10 iterations)
run: |
echo "🔥 Starting burn-in loop - detecting flaky tests"
for i in {1..10}; do
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔥 Burn-in iteration $i/10"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
npm run test:e2e || exit 1 # Replace with TEST_CMD
done
echo "✅ Burn-in complete - no flaky tests detected"
- name: Upload burn-in failure artifacts
if: failure()
uses: actions/upload-artifact@v4
with:
name: burn-in-failures
path: |
test-results/
playwright-report/
retention-days: 30
# Report stage - Aggregate and publish results
report:
name: Test Report
runs-on: ubuntu-latest
needs: [test, burn-in]
if: always()
steps:
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
- name: Generate summary
run: |
echo "## Test Execution Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- **Status**: ${{ needs.test.result }}" >> $GITHUB_STEP_SUMMARY
echo "- **Burn-in**: ${{ needs.burn-in.result }}" >> $GITHUB_STEP_SUMMARY
echo "- **Shards**: 4" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ "${{ needs.burn-in.result }}" == "failure" ]; then
echo "⚠️ **Flaky tests detected** - Review burn-in artifacts" >> $GITHUB_STEP_SUMMARY
fi
# ============================================================================
# EXTENSION PATTERNS — Script Injection Prevention
# ============================================================================
# When extending this template into reusable workflows, manual dispatch
# workflows, or composite actions, NEVER use ${{ inputs.* }} directly in
# run: blocks. Always pass through env: intermediaries.
#
# KEY PRINCIPLE: Inputs must be DATA, not COMMANDS.
# Pass inputs through env: and interpolate as quoted arguments into fixed
# commands. NEVER accept command-shaped inputs (e.g., install-command,
# test-command) that get executed as shell code — even through env:.
#
# --- Reusable Workflow (workflow_call) ---
#
# on:
# workflow_call:
# inputs:
# test-grep:
# description: 'Test grep filter (data only — not a command)'
# type: string
# required: false
# default: ''
# base-ref:
# description: 'Base branch for diff'
# type: string
# required: false
# default: 'main'
# burn-in-count:
# description: 'Number of burn-in iterations'
# type: string
# required: false
# default: '10'
#
# jobs:
# test:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v4
# # Fixed command — not derived from inputs
# - name: Install dependencies
# run: npm ci
# # ✅ SAFE — input is DATA passed as an argument to a fixed command
# - name: Run tests
# env:
# TEST_GREP: ${{ inputs.test-grep }}
# run: |
# # Security: inputs passed through env: to prevent script injection
# if [ -n "$TEST_GREP" ]; then
# npx playwright test --grep "$TEST_GREP"
# else
# npx playwright test
# fi
#
# --- Manual Dispatch (workflow_dispatch) ---
#
# on:
# workflow_dispatch:
# inputs:
# test-grep:
# description: 'Test grep filter (data only — not a command)'
# type: string
# required: false
# environment:
# description: 'Target environment'
# type: choice
# options: [staging, production]
#
# jobs:
# run-tests:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v4
# # ✅ SAFE — input is DATA interpolated into a fixed command
# - name: Run selected tests
# env:
# TEST_GREP: ${{ inputs.test-grep }}
# run: |
# # Security: inputs passed through env: to prevent script injection
# npx playwright test --grep "$TEST_GREP"
#
# --- Composite Action (action.yml) ---
#
# inputs:
# test-grep:
# description: 'Test grep filter (data only — not a command)'
# required: false
# default: ''
# burn-in-count:
# description: 'Number of burn-in iterations'
# required: false
# default: '10'
#
# runs:
# using: composite
# steps:
# # ✅ SAFE — inputs are DATA arguments to fixed commands
# - name: Run burn-in
# shell: bash
# env:
# TEST_GREP: ${{ inputs.test-grep }}
# BURN_IN_COUNT: ${{ inputs.burn-in-count }}
# run: |
# # Security: inputs passed through env: to prevent script injection
# for i in $(seq 1 "$BURN_IN_COUNT"); do
# echo "Burn-in iteration $i/$BURN_IN_COUNT"
# npx playwright test --grep "$TEST_GREP" || exit 1
# done
#
# ❌ NEVER DO THIS:
# # Direct ${{ inputs.* }} in run: — GitHub expression injection
# - run: npx playwright test --grep "${{ inputs.test-grep }}"
#
# # Executing input-derived env var as a command — still command injection
# - env:
# CMD: ${{ inputs.test-command }}
# run: $CMD
# ============================================================================

View File

@@ -0,0 +1,328 @@
# GitHub Actions CI/CD Pipeline for Test Execution
# Generated by BMad TEA Agent - Test Architect Module
# Optimized for: Parallel Sharding, Burn-In Loop
# Stack: {test_stack_type} | Framework: {test_framework}
#
# Variables to customize per project:
# INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile, yarn --frozen-lockfile)
# TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
# LINT_CMD - lint command (e.g., npm run lint)
# BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
# BROWSER_CACHE_PATH - browser cache path (frontend/fullstack only; omit for backend)
name: Test Pipeline
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
schedule:
# Weekly burn-in on Sundays at 2 AM UTC
- cron: "0 2 * * 0"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# Lint stage - Code quality checks
lint:
name: Lint
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- name: Determine Node version
id: node-version
run: |
if [ -f .nvmrc ]; then
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
echo "Using Node from .nvmrc"
else
echo "value=24" >> "$GITHUB_OUTPUT"
echo "Using default Node 24 (current LTS)"
fi
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ steps.node-version.outputs.value }}
cache: "npm"
- name: Install dependencies
run: npm ci # Replace with INSTALL_CMD
- name: Run linter
run: npm run lint # Replace with LINT_CMD
# Test stage - Parallel execution with sharding
test:
name: Test (Shard ${{ matrix.shard }})
runs-on: ubuntu-latest
timeout-minutes: 30
needs: lint
strategy:
fail-fast: false
matrix:
shard: [1, 2, 3, 4]
steps:
- uses: actions/checkout@v4
- name: Determine Node version
id: node-version
run: |
if [ -f .nvmrc ]; then
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
echo "Using Node from .nvmrc"
else
echo "value=22" >> "$GITHUB_OUTPUT"
echo "Using default Node 22 (current LTS)"
fi
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ steps.node-version.outputs.value }}
cache: "npm"
- name: Cache Playwright browsers
uses: actions/cache@v4
with:
path: ~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-playwright-
- name: Install dependencies
run: npm ci # Replace with INSTALL_CMD
# Frontend/Fullstack only — remove this step for backend-only stacks
- name: Install Playwright browsers
run: npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL
- name: Run tests (shard ${{ matrix.shard }}/4)
run: npm run test:e2e -- --shard=${{ matrix.shard }}/4 # Replace with TEST_CMD + shard args
- name: Upload test results
if: failure()
uses: actions/upload-artifact@v4
with:
name: test-results-${{ matrix.shard }}
path: |
test-results/
playwright-report/
retention-days: 30
# Burn-in stage - Flaky test detection
burn-in:
name: Burn-In (Flaky Detection)
runs-on: ubuntu-latest
timeout-minutes: 60
needs: test
# Only run burn-in on PRs to main/develop or on schedule
if: github.event_name == 'pull_request' || github.event_name == 'schedule'
steps:
- uses: actions/checkout@v4
- name: Determine Node version
id: node-version
run: |
if [ -f .nvmrc ]; then
echo "value=$(cat .nvmrc)" >> "$GITHUB_OUTPUT"
echo "Using Node from .nvmrc"
else
echo "value=22" >> "$GITHUB_OUTPUT"
echo "Using default Node 22 (current LTS)"
fi
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ steps.node-version.outputs.value }}
cache: "npm"
# Frontend/Fullstack only — remove this step for backend-only stacks
- name: Cache Playwright browsers
uses: actions/cache@v4
with:
path: ~/.cache/ms-playwright # Replace with BROWSER_CACHE_PATH
key: ${{ runner.os }}-playwright-${{ hashFiles('**/package-lock.json') }}
- name: Install dependencies
run: npm ci # Replace with INSTALL_CMD
# Frontend/Fullstack only — remove this step for backend-only stacks
- name: Install Playwright browsers
run: npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL
# Note: Burn-in targets UI flakiness. For backend-only stacks, remove this job entirely.
- name: Run burn-in loop (10 iterations)
run: |
echo "🔥 Starting burn-in loop - detecting flaky tests"
for i in {1..10}; do
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔥 Burn-in iteration $i/10"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
npm run test:e2e || exit 1 # Replace with TEST_CMD
done
echo "✅ Burn-in complete - no flaky tests detected"
- name: Upload burn-in failure artifacts
if: failure()
uses: actions/upload-artifact@v4
with:
name: burn-in-failures
path: |
test-results/
playwright-report/
retention-days: 30
# Report stage - Aggregate and publish results
report:
name: Test Report
runs-on: ubuntu-latest
needs: [test, burn-in]
if: always()
steps:
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
- name: Generate summary
run: |
echo "## Test Execution Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- **Status**: ${{ needs.test.result }}" >> $GITHUB_STEP_SUMMARY
echo "- **Burn-in**: ${{ needs.burn-in.result }}" >> $GITHUB_STEP_SUMMARY
echo "- **Shards**: 4" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ "${{ needs.burn-in.result }}" == "failure" ]; then
echo "⚠️ **Flaky tests detected** - Review burn-in artifacts" >> $GITHUB_STEP_SUMMARY
fi
# ============================================================================
# EXTENSION PATTERNS — Script Injection Prevention
# ============================================================================
# When extending this template into reusable workflows, manual dispatch
# workflows, or composite actions, NEVER use ${{ inputs.* }} directly in
# run: blocks. Always pass through env: intermediaries.
#
# KEY PRINCIPLE: Inputs must be DATA, not COMMANDS.
# Pass inputs through env: and interpolate as quoted arguments into fixed
# commands. NEVER accept command-shaped inputs (e.g., install-command,
# test-command) that get executed as shell code — even through env:.
#
# --- Reusable Workflow (workflow_call) ---
#
# on:
# workflow_call:
# inputs:
# test-grep:
# description: 'Test grep filter (data only — not a command)'
# type: string
# required: false
# default: ''
# base-ref:
# description: 'Base branch for diff'
# type: string
# required: false
# default: 'main'
# burn-in-count:
# description: 'Number of burn-in iterations'
# type: string
# required: false
# default: '10'
#
# jobs:
# test:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v4
# # Fixed command — not derived from inputs
# - name: Install dependencies
# run: npm ci
# # ✅ SAFE — input is DATA passed as an argument to a fixed command
# - name: Run tests
# env:
# TEST_GREP: ${{ inputs.test-grep }}
# run: |
# # Security: inputs passed through env: to prevent script injection
# if [ -n "$TEST_GREP" ]; then
# npx playwright test --grep "$TEST_GREP"
# else
# npx playwright test
# fi
#
# --- Manual Dispatch (workflow_dispatch) ---
#
# on:
# workflow_dispatch:
# inputs:
# test-grep:
# description: 'Test grep filter (data only — not a command)'
# type: string
# required: false
# environment:
# description: 'Target environment'
# type: choice
# options: [staging, production]
#
# jobs:
# run-tests:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v4
# # ✅ SAFE — input is DATA interpolated into a fixed command
# - name: Run selected tests
# env:
# TEST_GREP: ${{ inputs.test-grep }}
# run: |
# # Security: inputs passed through env: to prevent script injection
# npx playwright test --grep "$TEST_GREP"
#
# --- Composite Action (action.yml) ---
#
# inputs:
# test-grep:
# description: 'Test grep filter (data only — not a command)'
# required: false
# default: ''
# burn-in-count:
# description: 'Number of burn-in iterations'
# required: false
# default: '10'
#
# runs:
# using: composite
# steps:
# # ✅ SAFE — inputs are DATA arguments to fixed commands
# - name: Run burn-in
# shell: bash
# env:
# TEST_GREP: ${{ inputs.test-grep }}
# BURN_IN_COUNT: ${{ inputs.burn-in-count }}
# run: |
# # Security: inputs passed through env: to prevent script injection
# for i in $(seq 1 "$BURN_IN_COUNT"); do
# echo "Burn-in iteration $i/$BURN_IN_COUNT"
# npx playwright test --grep "$TEST_GREP" || exit 1
# done
#
# ❌ NEVER DO THIS:
# # Direct ${{ inputs.* }} in run: — GitHub expression injection
# - run: npx playwright test --grep "${{ inputs.test-grep }}"
#
# # Executing input-derived env var as a command — still command injection
# - env:
# CMD: ${{ inputs.test-command }}
# run: $CMD
# ============================================================================

View File

@@ -0,0 +1,158 @@
# GitLab CI/CD Pipeline for Test Execution
# Generated by BMad TEA Agent - Test Architect Module
# Optimized for: Parallel Sharding, Burn-In Loop
# Stack: {test_stack_type} | Framework: {test_framework}
#
# Variables to customize per project:
# INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile)
# TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
# LINT_CMD - lint command (e.g., npm run lint)
# BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
# BROWSER_CACHE_PATH - browser cache path (frontend/fullstack only; omit for backend)
stages:
- lint
- test
- burn-in
- report
variables:
# Disable git depth for accurate change detection
GIT_DEPTH: 0
# Use npm ci for faster, deterministic installs
npm_config_cache: "$CI_PROJECT_DIR/.npm"
# Playwright browser cache
PLAYWRIGHT_BROWSERS_PATH: "$CI_PROJECT_DIR/.cache/ms-playwright"
# Default Node version when .nvmrc is missing
DEFAULT_NODE_VERSION: "24"
# Caching configuration
cache:
key:
files:
- package-lock.json
paths:
- .npm/
- .cache/ms-playwright/
- node_modules/
# Lint stage - Code quality checks
lint:
stage: lint
image: node:$DEFAULT_NODE_VERSION
before_script:
- |
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
echo "Using Node $NODE_VERSION"
npm install -g n
n "$NODE_VERSION"
node -v
- npm ci # Replace with INSTALL_CMD
script:
- npm run lint # Replace with LINT_CMD
timeout: 5 minutes
# Test stage - Parallel execution with sharding
.test-template: &test-template
stage: test
image: node:$DEFAULT_NODE_VERSION
needs:
- lint
before_script:
- |
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
echo "Using Node $NODE_VERSION"
npm install -g n
n "$NODE_VERSION"
node -v
- npm ci # Replace with INSTALL_CMD
- npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL; remove for backend-only
artifacts:
when: on_failure
paths:
- test-results/
- playwright-report/
expire_in: 30 days
timeout: 30 minutes
test:shard-1:
<<: *test-template
script:
- npm run test:e2e -- --shard=1/4 # Replace with TEST_CMD + shard args
test:shard-2:
<<: *test-template
script:
- npm run test:e2e -- --shard=2/4 # Replace with TEST_CMD + shard args
test:shard-3:
<<: *test-template
script:
- npm run test:e2e -- --shard=3/4 # Replace with TEST_CMD + shard args
test:shard-4:
<<: *test-template
script:
- npm run test:e2e -- --shard=4/4 # Replace with TEST_CMD + shard args
# Burn-in stage - Flaky test detection
burn-in:
stage: burn-in
image: node:$DEFAULT_NODE_VERSION
needs:
- test:shard-1
- test:shard-2
- test:shard-3
- test:shard-4
# Only run burn-in on merge requests to main/develop or on schedule
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- if: '$CI_PIPELINE_SOURCE == "schedule"'
before_script:
- |
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
echo "Using Node $NODE_VERSION"
npm install -g n
n "$NODE_VERSION"
node -v
- npm ci # Replace with INSTALL_CMD
- npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL; remove for backend-only
# Note: Burn-in targets UI flakiness. For backend-only stacks, remove this job entirely.
script:
- |
echo "🔥 Starting burn-in loop - detecting flaky tests"
for i in {1..10}; do
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔥 Burn-in iteration $i/10"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
npm run test:e2e || exit 1 # Replace with TEST_CMD
done
echo "✅ Burn-in complete - no flaky tests detected"
artifacts:
when: on_failure
paths:
- test-results/
- playwright-report/
expire_in: 30 days
timeout: 60 minutes
# Report stage - Aggregate results
report:
stage: report
image: alpine:latest
needs:
- test:shard-1
- test:shard-2
- test:shard-3
- test:shard-4
- burn-in
when: always
script:
- |
echo "## Test Execution Summary"
echo ""
echo "- Pipeline: $CI_PIPELINE_ID"
echo "- Shards: 4"
echo "- Branch: $CI_COMMIT_REF_NAME"
echo ""
echo "View detailed results in job artifacts"

View File

@@ -0,0 +1,158 @@
# GitLab CI/CD Pipeline for Test Execution
# Generated by BMad TEA Agent - Test Architect Module
# Optimized for: Parallel Sharding, Burn-In Loop
# Stack: {test_stack_type} | Framework: {test_framework}
#
# Variables to customize per project:
# INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile)
# TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
# LINT_CMD - lint command (e.g., npm run lint)
# BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
# BROWSER_CACHE_PATH - browser cache path (frontend/fullstack only; omit for backend)
stages:
- lint
- test
- burn-in
- report
variables:
# Disable git depth for accurate change detection
GIT_DEPTH: 0
# Use npm ci for faster, deterministic installs
npm_config_cache: "$CI_PROJECT_DIR/.npm"
# Playwright browser cache
PLAYWRIGHT_BROWSERS_PATH: "$CI_PROJECT_DIR/.cache/ms-playwright"
# Default Node version when .nvmrc is missing
DEFAULT_NODE_VERSION: "24"
# Caching configuration
cache:
key:
files:
- package-lock.json
paths:
- .npm/
- .cache/ms-playwright/
- node_modules/
# Lint stage - Code quality checks
lint:
stage: lint
image: node:$DEFAULT_NODE_VERSION
before_script:
- |
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
echo "Using Node $NODE_VERSION"
npm install -g n
n "$NODE_VERSION"
node -v
- npm ci # Replace with INSTALL_CMD
script:
- npm run lint # Replace with LINT_CMD
timeout: 5 minutes
# Test stage - Parallel execution with sharding
.test-template: &test-template
stage: test
image: node:$DEFAULT_NODE_VERSION
needs:
- lint
before_script:
- |
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
echo "Using Node $NODE_VERSION"
npm install -g n
n "$NODE_VERSION"
node -v
- npm ci # Replace with INSTALL_CMD
- npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL; remove for backend-only
artifacts:
when: on_failure
paths:
- test-results/
- playwright-report/
expire_in: 30 days
timeout: 30 minutes
test:shard-1:
<<: *test-template
script:
- npm run test:e2e -- --shard=1/4 # Replace with TEST_CMD + shard args
test:shard-2:
<<: *test-template
script:
- npm run test:e2e -- --shard=2/4 # Replace with TEST_CMD + shard args
test:shard-3:
<<: *test-template
script:
- npm run test:e2e -- --shard=3/4 # Replace with TEST_CMD + shard args
test:shard-4:
<<: *test-template
script:
- npm run test:e2e -- --shard=4/4 # Replace with TEST_CMD + shard args
# Burn-in stage - Flaky test detection
burn-in:
stage: burn-in
image: node:$DEFAULT_NODE_VERSION
needs:
- test:shard-1
- test:shard-2
- test:shard-3
- test:shard-4
# Only run burn-in on merge requests to main/develop or on schedule
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- if: '$CI_PIPELINE_SOURCE == "schedule"'
before_script:
- |
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "$DEFAULT_NODE_VERSION")
echo "Using Node $NODE_VERSION"
npm install -g n
n "$NODE_VERSION"
node -v
- npm ci # Replace with INSTALL_CMD
- npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL; remove for backend-only
# Note: Burn-in targets UI flakiness. For backend-only stacks, remove this job entirely.
script:
- |
echo "🔥 Starting burn-in loop - detecting flaky tests"
for i in {1..10}; do
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔥 Burn-in iteration $i/10"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
npm run test:e2e || exit 1 # Replace with TEST_CMD
done
echo "✅ Burn-in complete - no flaky tests detected"
artifacts:
when: on_failure
paths:
- test-results/
- playwright-report/
expire_in: 30 days
timeout: 60 minutes
# Report stage - Aggregate results
report:
stage: report
image: alpine:latest
needs:
- test:shard-1
- test:shard-2
- test:shard-3
- test:shard-4
- burn-in
when: always
script:
- |
echo "## Test Execution Summary"
echo ""
echo "- Pipeline: $CI_PIPELINE_ID"
echo "- Shards: 4"
echo "- Branch: $CI_COMMIT_REF_NAME"
echo ""
echo "View detailed results in job artifacts"

View File

@@ -0,0 +1,159 @@
# Harness CI Pipeline for Test Execution
# Generated by BMad TEA Agent - Test Architect Module
# Optimized for: Parallel Sharding, Burn-In Loop
# Stack: {test_stack_type} | Framework: {test_framework}
#
# Variables to customize per project:
# INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile)
# TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
# LINT_CMD - lint command (e.g., npm run lint)
# BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
pipeline:
name: Test Pipeline
identifier: test_pipeline
projectIdentifier: default
orgIdentifier: default
stages:
# Lint stage - Code quality checks
- stage:
name: Lint
identifier: lint
type: CI
spec:
cloneCodebase: true
infrastructure:
type: KubernetesDirect
spec:
connectorRef: account.harnessImage
namespace: default
execution:
steps:
- step:
type: Run
name: Install dependencies
identifier: install
spec:
connectorRef: account.harnessImage
image: node:24
shell: Sh
command: npm ci # Replace with INSTALL_CMD
- step:
type: Run
name: Run linter
identifier: lint
spec:
connectorRef: account.harnessImage
image: node:24
shell: Sh
command: npm run lint # Replace with LINT_CMD
# Test stage - Parallel execution with sharding
- stage:
name: Test
identifier: test
type: CI
spec:
cloneCodebase: true
infrastructure:
type: KubernetesDirect
spec:
connectorRef: account.harnessImage
namespace: default
execution:
steps:
- step:
type: Run
name: Install dependencies
identifier: install
spec:
connectorRef: account.harnessImage
image: node:24
shell: Sh
command: npm ci # Replace with INSTALL_CMD
# Frontend/Fullstack only — remove this step for backend-only stacks
- step:
type: Run
name: Install browsers
identifier: browsers
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL
- parallel:
- step:
type: Run
name: Test Shard 1
identifier: shard_1
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npm run test:e2e -- --shard=1/4 # Replace with TEST_CMD + shard args
- step:
type: Run
name: Test Shard 2
identifier: shard_2
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npm run test:e2e -- --shard=2/4 # Replace with TEST_CMD + shard args
- step:
type: Run
name: Test Shard 3
identifier: shard_3
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npm run test:e2e -- --shard=3/4 # Replace with TEST_CMD + shard args
- step:
type: Run
name: Test Shard 4
identifier: shard_4
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npm run test:e2e -- --shard=4/4 # Replace with TEST_CMD + shard args
# Burn-in stage - Flaky test detection
# Note: Burn-in targets UI flakiness. For backend-only stacks, remove this stage entirely.
- stage:
name: Burn-In
identifier: burn_in
type: CI
when:
condition: <+pipeline.triggerType> == "WEBHOOK" || <+pipeline.triggerType> == "SCHEDULER"
spec:
cloneCodebase: true
infrastructure:
type: KubernetesDirect
spec:
connectorRef: account.harnessImage
namespace: default
execution:
steps:
- step:
type: Run
name: Install and burn-in
identifier: burn_in_loop
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: |
npm ci
npx playwright install --with-deps chromium
echo "Starting burn-in loop - detecting flaky tests"
for i in $(seq 1 10); do
echo "Burn-in iteration $i/10"
npm run test:e2e || exit 1
done
echo "Burn-in complete - no flaky tests detected"
# Replace npm ci with INSTALL_CMD, npm run test:e2e with TEST_CMD

View File

@@ -0,0 +1,159 @@
# Harness CI Pipeline for Test Execution
# Generated by BMad TEA Agent - Test Architect Module
# Optimized for: Parallel Sharding, Burn-In Loop
# Stack: {test_stack_type} | Framework: {test_framework}
#
# Variables to customize per project:
# INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile)
# TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
# LINT_CMD - lint command (e.g., npm run lint)
# BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
pipeline:
name: Test Pipeline
identifier: test_pipeline
projectIdentifier: default
orgIdentifier: default
stages:
# Lint stage - Code quality checks
- stage:
name: Lint
identifier: lint
type: CI
spec:
cloneCodebase: true
infrastructure:
type: KubernetesDirect
spec:
connectorRef: account.harnessImage
namespace: default
execution:
steps:
- step:
type: Run
name: Install dependencies
identifier: install
spec:
connectorRef: account.harnessImage
image: node:24
shell: Sh
command: npm ci # Replace with INSTALL_CMD
- step:
type: Run
name: Run linter
identifier: lint
spec:
connectorRef: account.harnessImage
image: node:24
shell: Sh
command: npm run lint # Replace with LINT_CMD
# Test stage - Parallel execution with sharding
- stage:
name: Test
identifier: test
type: CI
spec:
cloneCodebase: true
infrastructure:
type: KubernetesDirect
spec:
connectorRef: account.harnessImage
namespace: default
execution:
steps:
- step:
type: Run
name: Install dependencies
identifier: install
spec:
connectorRef: account.harnessImage
image: node:24
shell: Sh
command: npm ci # Replace with INSTALL_CMD
# Frontend/Fullstack only — remove this step for backend-only stacks
- step:
type: Run
name: Install browsers
identifier: browsers
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npx playwright install --with-deps chromium # Replace with BROWSER_INSTALL
- parallel:
- step:
type: Run
name: Test Shard 1
identifier: shard_1
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npm run test:e2e -- --shard=1/4 # Replace with TEST_CMD + shard args
- step:
type: Run
name: Test Shard 2
identifier: shard_2
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npm run test:e2e -- --shard=2/4 # Replace with TEST_CMD + shard args
- step:
type: Run
name: Test Shard 3
identifier: shard_3
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npm run test:e2e -- --shard=3/4 # Replace with TEST_CMD + shard args
- step:
type: Run
name: Test Shard 4
identifier: shard_4
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: npm run test:e2e -- --shard=4/4 # Replace with TEST_CMD + shard args
# Burn-in stage - Flaky test detection
# Note: Burn-in targets UI flakiness. For backend-only stacks, remove this stage entirely.
- stage:
name: Burn-In
identifier: burn_in
type: CI
when:
condition: <+pipeline.triggerType> == "WEBHOOK" || <+pipeline.triggerType> == "SCHEDULER"
spec:
cloneCodebase: true
infrastructure:
type: KubernetesDirect
spec:
connectorRef: account.harnessImage
namespace: default
execution:
steps:
- step:
type: Run
name: Install and burn-in
identifier: burn_in_loop
spec:
connectorRef: account.harnessImage
image: mcr.microsoft.com/playwright:v1.50.0-noble
shell: Sh
command: |
npm ci
npx playwright install --with-deps chromium
echo "Starting burn-in loop - detecting flaky tests"
for i in $(seq 1 10); do
echo "Burn-in iteration $i/10"
npm run test:e2e || exit 1
done
echo "Burn-in complete - no flaky tests detected"
# Replace npm ci with INSTALL_CMD, npm run test:e2e with TEST_CMD

View File

@@ -0,0 +1,45 @@
<!-- Powered by BMAD-CORE™ -->
# CI/CD Pipeline Setup
**Workflow ID**: `_bmad/tea/testarch/ci`
**Version**: 5.0 (Step-File Architecture)
---
## Overview
Scaffold a production-ready CI/CD quality pipeline with test execution, burn-in loops for flaky detection, parallel sharding, artifact collection, and notifications.
---
## WORKFLOW ARCHITECTURE
This workflow uses **step-file architecture**:
- **Micro-file Design**: Each step is self-contained
- **JIT Loading**: Only the current step file is in memory
- **Sequential Enforcement**: Execute steps in order
---
## INITIALIZATION SEQUENCE
### 1. Configuration Loading
From `workflow.yaml`, resolve:
- `config_source`, `test_artifacts`, `user_name`, `communication_language`, `document_output_language`, `date`
- `ci_platform`, `test_dir`
### 2. First Step
Load, read completely, and execute:
`{project-root}/_bmad/tea/workflows/testarch/ci/steps-c/step-01-preflight.md`
### 3. Resume Support
If the user selects **Resume** mode, load, read completely, and execute:
`{project-root}/_bmad/tea/workflows/testarch/ci/steps-c/step-01b-resume.md`
This checks the output document for progress tracking frontmatter and routes to the next incomplete step.

View File

@@ -0,0 +1,45 @@
<!-- Powered by BMAD-CORE™ -->
# CI/CD Pipeline Setup
**Workflow ID**: `_bmad/tea/testarch/ci`
**Version**: 5.0 (Step-File Architecture)
---
## Overview
Scaffold a production-ready CI/CD quality pipeline with test execution, burn-in loops for flaky detection, parallel sharding, artifact collection, and notifications.
---
## WORKFLOW ARCHITECTURE
This workflow uses **step-file architecture**:
- **Micro-file Design**: Each step is self-contained
- **JIT Loading**: Only the current step file is in memory
- **Sequential Enforcement**: Execute steps in order
---
## INITIALIZATION SEQUENCE
### 1. Configuration Loading
From `workflow.yaml`, resolve:
- `config_source`, `test_artifacts`, `user_name`, `communication_language`, `document_output_language`, `date`
- `ci_platform`, `test_dir`
### 2. First Step
Load, read completely, and execute:
`{project-root}/_bmad/tea/workflows/testarch/ci/steps-c/step-01-preflight.md`
### 3. Resume Support
If the user selects **Resume** mode, load, read completely, and execute:
`{project-root}/_bmad/tea/workflows/testarch/ci/steps-c/step-01b-resume.md`
This checks the output document for progress tracking frontmatter and routes to the next incomplete step.

View File

@@ -0,0 +1,129 @@
// Jenkinsfile CI/CD Pipeline for Test Execution
// Generated by BMad TEA Agent - Test Architect Module
// Optimized for: Parallel Sharding, Burn-In Loop
// Stack: {test_stack_type} | Framework: {test_framework}
//
// Variables to customize per project:
// INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile)
// TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
// LINT_CMD - lint command (e.g., npm run lint)
// BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
//
// Node.js version management — choose one:
// Option A (recommended): Configure NodeJS Plugin in Jenkins Global Tool Configuration,
// then add to pipeline: tools { nodejs 'NodeJS-24' }
// Option B: Use nvm (pre-installed on agent) — this template uses nvm as the default
// Option C: Use a Docker agent — agent { docker { image 'node:24' } }
pipeline {
agent any
environment {
CI = 'true'
}
options {
timeout(time: 45, unit: 'MINUTES')
disableConcurrentBuilds()
}
stages {
stage('Checkout') {
steps {
checkout scm
}
}
stage('Install') {
steps {
// Detect and apply Node.js version from .nvmrc (falls back to v24)
// If using NodeJS Plugin instead, remove this block and add: tools { nodejs 'NodeJS-24' }
sh '''
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh"
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "24")
nvm install "$NODE_VERSION" 2>/dev/null || true
nvm use "$NODE_VERSION" 2>/dev/null || true
node --version
npm ci
''' // Replace npm ci with INSTALL_CMD
// Stash installed dependencies so parallel shards can restore them
stash includes: 'node_modules/**', name: 'deps'
}
}
stage('Lint') {
steps {
sh 'npm run lint' // Replace with LINT_CMD
}
}
// Test stage - Parallel execution with sharding
// Each shard restores dependencies via unstash for workspace safety
stage('Test') {
parallel {
stage('Shard 1') {
steps {
unstash 'deps'
// Frontend/Fullstack only — remove browser install for backend-only stacks
sh 'npx playwright install --with-deps chromium' // Replace with BROWSER_INSTALL
sh 'npm run test:e2e -- --shard=1/4' // Replace with TEST_CMD + shard args
}
}
stage('Shard 2') {
steps {
unstash 'deps'
sh 'npx playwright install --with-deps chromium' // Replace with BROWSER_INSTALL
sh 'npm run test:e2e -- --shard=2/4' // Replace with TEST_CMD + shard args
}
}
stage('Shard 3') {
steps {
unstash 'deps'
sh 'npx playwright install --with-deps chromium' // Replace with BROWSER_INSTALL
sh 'npm run test:e2e -- --shard=3/4' // Replace with TEST_CMD + shard args
}
}
stage('Shard 4') {
steps {
unstash 'deps'
sh 'npx playwright install --with-deps chromium' // Replace with BROWSER_INSTALL
sh 'npm run test:e2e -- --shard=4/4' // Replace with TEST_CMD + shard args
}
}
}
}
// Burn-in stage - Flaky test detection
// Note: Burn-in targets UI flakiness. For backend-only stacks, remove this stage entirely.
stage('Burn-In') {
when {
anyOf {
changeRequest()
triggeredBy 'TimerTrigger'
}
}
steps {
sh '''
echo "Starting burn-in loop - detecting flaky tests"
for i in $(seq 1 10); do
echo "Burn-in iteration $i/10"
npm run test:e2e || exit 1
done
echo "Burn-in complete - no flaky tests detected"
''' // Replace npm run test:e2e with TEST_CMD
}
}
}
post {
always {
// Archive test results and reports
archiveArtifacts artifacts: 'test-results/**,playwright-report/**', allowEmptyArchive: true
junit testResults: 'test-results/**/*.xml', allowEmptyResults: true
}
failure {
echo 'Pipeline failed - check test results and artifacts'
}
}
}

View File

@@ -0,0 +1,129 @@
// Jenkinsfile CI/CD Pipeline for Test Execution
// Generated by BMad TEA Agent - Test Architect Module
// Optimized for: Parallel Sharding, Burn-In Loop
// Stack: {test_stack_type} | Framework: {test_framework}
//
// Variables to customize per project:
// INSTALL_CMD - dependency install command (e.g., npm ci, pnpm install --frozen-lockfile)
// TEST_CMD - main test command (e.g., npm run test:e2e, npm test, npx vitest)
// LINT_CMD - lint command (e.g., npm run lint)
// BROWSER_INSTALL - browser install command (frontend/fullstack only; omit for backend)
//
// Node.js version management — choose one:
// Option A (recommended): Configure NodeJS Plugin in Jenkins Global Tool Configuration,
// then add to pipeline: tools { nodejs 'NodeJS-24' }
// Option B: Use nvm (pre-installed on agent) — this template uses nvm as the default
// Option C: Use a Docker agent — agent { docker { image 'node:24' } }
pipeline {
agent any
environment {
CI = 'true'
}
options {
timeout(time: 45, unit: 'MINUTES')
disableConcurrentBuilds()
}
stages {
stage('Checkout') {
steps {
checkout scm
}
}
stage('Install') {
steps {
// Detect and apply Node.js version from .nvmrc (falls back to v24)
// If using NodeJS Plugin instead, remove this block and add: tools { nodejs 'NodeJS-24' }
sh '''
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh"
NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "24")
nvm install "$NODE_VERSION" 2>/dev/null || true
nvm use "$NODE_VERSION" 2>/dev/null || true
node --version
npm ci
''' // Replace npm ci with INSTALL_CMD
// Stash installed dependencies so parallel shards can restore them
stash includes: 'node_modules/**', name: 'deps'
}
}
stage('Lint') {
steps {
sh 'npm run lint' // Replace with LINT_CMD
}
}
// Test stage - Parallel execution with sharding
// Each shard restores dependencies via unstash for workspace safety
stage('Test') {
parallel {
stage('Shard 1') {
steps {
unstash 'deps'
// Frontend/Fullstack only — remove browser install for backend-only stacks
sh 'npx playwright install --with-deps chromium' // Replace with BROWSER_INSTALL
sh 'npm run test:e2e -- --shard=1/4' // Replace with TEST_CMD + shard args
}
}
stage('Shard 2') {
steps {
unstash 'deps'
sh 'npx playwright install --with-deps chromium' // Replace with BROWSER_INSTALL
sh 'npm run test:e2e -- --shard=2/4' // Replace with TEST_CMD + shard args
}
}
stage('Shard 3') {
steps {
unstash 'deps'
sh 'npx playwright install --with-deps chromium' // Replace with BROWSER_INSTALL
sh 'npm run test:e2e -- --shard=3/4' // Replace with TEST_CMD + shard args
}
}
stage('Shard 4') {
steps {
unstash 'deps'
sh 'npx playwright install --with-deps chromium' // Replace with BROWSER_INSTALL
sh 'npm run test:e2e -- --shard=4/4' // Replace with TEST_CMD + shard args
}
}
}
}
// Burn-in stage - Flaky test detection
// Note: Burn-in targets UI flakiness. For backend-only stacks, remove this stage entirely.
stage('Burn-In') {
when {
anyOf {
changeRequest()
triggeredBy 'TimerTrigger'
}
}
steps {
sh '''
echo "Starting burn-in loop - detecting flaky tests"
for i in $(seq 1 10); do
echo "Burn-in iteration $i/10"
npm run test:e2e || exit 1
done
echo "Burn-in complete - no flaky tests detected"
''' // Replace npm run test:e2e with TEST_CMD
}
}
}
post {
always {
// Archive test results and reports
archiveArtifacts artifacts: 'test-results/**,playwright-report/**', allowEmptyArchive: true
junit testResults: 'test-results/**/*.xml', allowEmptyResults: true
}
failure {
echo 'Pipeline failed - check test results and artifacts'
}
}
}

View File

@@ -0,0 +1,158 @@
---
name: 'step-01-preflight'
description: 'Verify prerequisites and detect CI platform'
nextStepFile: './step-02-generate-pipeline.md'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 1: Preflight Checks
## STEP GOAL
Verify CI prerequisites and determine target CI platform.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
- 🚫 Halt if requirements fail
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Record outputs before proceeding
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: config, loaded artifacts, and knowledge fragments
- Focus: this step's goal only
- Limits: do not execute future steps
- Dependencies: prior steps' outputs (if any)
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
## 1. Verify Git Repository
- `.git/` exists
- Remote configured (if available)
If missing: **HALT** with "Git repository required for CI/CD setup."
---
## 2. Detect Test Stack Type
Determine the project's test stack type (`test_stack_type`) using the following algorithm:
1. If `test_stack_type` is explicitly set in config (not `"auto"`), use that value.
2. Otherwise, auto-detect by scanning project manifests:
- **Frontend indicators**: `playwright.config.*`, `cypress.config.*`, `vite.config.*`, `next.config.*`, `src/components/`, `src/pages/`, `src/app/`
- **Backend indicators**: `pyproject.toml`, `pom.xml`/`build.gradle`, `go.mod`, `*.csproj`/`*.sln`, `Gemfile`, `Cargo.toml`, `jest.config.*`, `vitest.config.*`, `src/routes/`, `src/controllers/`, `src/api/`, `Dockerfile`, `serverless.yml`
- **Both present**`fullstack`
- **Only frontend**`frontend`
- **Only backend**`backend`
- **Cannot determine** → default to `fullstack` and note assumption
Record detected `test_stack_type` in step output.
---
## 3. Verify Test Framework
- Check for framework configuration based on detected stack:
- **Frontend/Fullstack**: `playwright.config.*` or `cypress.config.*` exists
- **Backend (Node.js)**: `jest.config.*` or `vitest.config.*` or test scripts in `package.json`
- **Backend (Python)**: `pyproject.toml` with `[tool.pytest]` or `pytest.ini` or `setup.cfg` with pytest config
- **Backend (Java/Kotlin)**: `pom.xml` with surefire/failsafe plugins or `build.gradle` with test task
- **Backend (Go)**: `*_test.go` files present (Go convention — no config file needed)
- **Backend (C#/.NET)**: `*.csproj` with xUnit/NUnit/MSTest references
- **Backend (Ruby)**: `Gemfile` with rspec or `.rspec` config file
- If `test_framework` is `"auto"`, detect from config files and project manifests found
- Verify test dependencies are installed (language-appropriate package manager)
If missing: **HALT** with "Run `framework` workflow first."
---
## 4. Ensure Tests Pass Locally
- Run the main test command based on detected stack and framework:
- **Node.js**: `npm test` or `npm run test:e2e`
- **Python**: `pytest` or `python -m pytest`
- **Java**: `mvn test` or `gradle test`
- **Go**: `go test ./...`
- **C#/.NET**: `dotnet test`
- **Ruby**: `bundle exec rspec`
- If failing: **HALT** and request fixes before CI setup
---
## 5. Detect CI Platform
- If `ci_platform` is explicitly set in config (not `"auto"`), use that value.
- Otherwise, scan for existing CI configuration files:
- `.github/workflows/*.yml``github-actions`
- `.gitlab-ci.yml``gitlab-ci`
- `Jenkinsfile``jenkins`
- `azure-pipelines.yml``azure-devops`
- `.harness/*.yaml``harness`
- `.circleci/config.yml``circle-ci`
- If found, ask whether to update or replace
- If not found, infer from git remote (github.com → `github-actions`, gitlab.com → `gitlab-ci`)
- If still unresolved, default to `github-actions`
Record detected `ci_platform` in step output.
---
## 6. Read Environment Context
- Read environment context based on detected stack:
- **Node.js**: Read `.nvmrc` if present (default to Node 24+ LTS if missing); read `package.json` for dependency caching strategy
- **Python**: Read `.python-version` or `pyproject.toml` for Python version; note `pip`/`poetry`/`pipenv` for caching
- **Java**: Read `pom.xml`/`build.gradle` for Java version; note Maven/Gradle for caching
- **Go**: Read `go.mod` for Go version; note Go module cache path
- **C#/.NET**: Read `*.csproj`/`global.json` for .NET SDK version; note NuGet cache
- **Ruby**: Read `.ruby-version` or `Gemfile` for Ruby version; note Bundler cache
---
### 7. Save Progress
**Save this step's accumulated work to `{outputFile}`.**
- **If `{outputFile}` does not exist** (first save), create it with YAML frontmatter:
```yaml
---
stepsCompleted: ['step-01-preflight']
lastStep: 'step-01-preflight'
lastSaved: '{date}'
---
```
Then write this step's output below the frontmatter.
- **If `{outputFile}` already exists**, update:
- Add `'step-01-preflight'` to `stepsCompleted` array (only if not already present)
- Set `lastStep: 'step-01-preflight'`
- Set `lastSaved: '{date}'`
- Append this step's output to the appropriate section of the document.
Load next step: `{nextStepFile}`
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Step completed in full with required outputs
### ❌ SYSTEM FAILURE:
- Skipped sequence steps or missing outputs
**Master Rule:** Skipping steps is FORBIDDEN.

View File

@@ -0,0 +1,158 @@
---
name: 'step-01-preflight'
description: 'Verify prerequisites and detect CI platform'
nextStepFile: './step-02-generate-pipeline.md'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 1: Preflight Checks
## STEP GOAL
Verify CI prerequisites and determine target CI platform.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
- 🚫 Halt if requirements fail
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Record outputs before proceeding
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: config, loaded artifacts, and knowledge fragments
- Focus: this step's goal only
- Limits: do not execute future steps
- Dependencies: prior steps' outputs (if any)
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
## 1. Verify Git Repository
- `.git/` exists
- Remote configured (if available)
If missing: **HALT** with "Git repository required for CI/CD setup."
---
## 2. Detect Test Stack Type
Determine the project's test stack type (`test_stack_type`) using the following algorithm:
1. If `test_stack_type` is explicitly set in config (not `"auto"`), use that value.
2. Otherwise, auto-detect by scanning project manifests:
- **Frontend indicators**: `playwright.config.*`, `cypress.config.*`, `vite.config.*`, `next.config.*`, `src/components/`, `src/pages/`, `src/app/`
- **Backend indicators**: `pyproject.toml`, `pom.xml`/`build.gradle`, `go.mod`, `*.csproj`/`*.sln`, `Gemfile`, `Cargo.toml`, `jest.config.*`, `vitest.config.*`, `src/routes/`, `src/controllers/`, `src/api/`, `Dockerfile`, `serverless.yml`
- **Both present** → `fullstack`
- **Only frontend** → `frontend`
- **Only backend** → `backend`
- **Cannot determine** → default to `fullstack` and note assumption
Record detected `test_stack_type` in step output.
---
## 3. Verify Test Framework
- Check for framework configuration based on detected stack:
- **Frontend/Fullstack**: `playwright.config.*` or `cypress.config.*` exists
- **Backend (Node.js)**: `jest.config.*` or `vitest.config.*` or test scripts in `package.json`
- **Backend (Python)**: `pyproject.toml` with `[tool.pytest]` or `pytest.ini` or `setup.cfg` with pytest config
- **Backend (Java/Kotlin)**: `pom.xml` with surefire/failsafe plugins or `build.gradle` with test task
- **Backend (Go)**: `*_test.go` files present (Go convention — no config file needed)
- **Backend (C#/.NET)**: `*.csproj` with xUnit/NUnit/MSTest references
- **Backend (Ruby)**: `Gemfile` with rspec or `.rspec` config file
- If `test_framework` is `"auto"`, detect from config files and project manifests found
- Verify test dependencies are installed (language-appropriate package manager)
If missing: **HALT** with "Run `framework` workflow first."
---
## 4. Ensure Tests Pass Locally
- Run the main test command based on detected stack and framework:
- **Node.js**: `npm test` or `npm run test:e2e`
- **Python**: `pytest` or `python -m pytest`
- **Java**: `mvn test` or `gradle test`
- **Go**: `go test ./...`
- **C#/.NET**: `dotnet test`
- **Ruby**: `bundle exec rspec`
- If failing: **HALT** and request fixes before CI setup
---
## 5. Detect CI Platform
- If `ci_platform` is explicitly set in config (not `"auto"`), use that value.
- Otherwise, scan for existing CI configuration files:
- `.github/workflows/*.yml``github-actions`
- `.gitlab-ci.yml``gitlab-ci`
- `Jenkinsfile``jenkins`
- `azure-pipelines.yml``azure-devops`
- `.harness/*.yaml``harness`
- `.circleci/config.yml``circle-ci`
- If found, ask whether to update or replace
- If not found, infer from git remote (github.com → `github-actions`, gitlab.com → `gitlab-ci`)
- If still unresolved, default to `github-actions`
Record detected `ci_platform` in step output.
---
## 6. Read Environment Context
- Read environment context based on detected stack:
- **Node.js**: Read `.nvmrc` if present (default to Node 24+ LTS if missing); read `package.json` for dependency caching strategy
- **Python**: Read `.python-version` or `pyproject.toml` for Python version; note `pip`/`poetry`/`pipenv` for caching
- **Java**: Read `pom.xml`/`build.gradle` for Java version; note Maven/Gradle for caching
- **Go**: Read `go.mod` for Go version; note Go module cache path
- **C#/.NET**: Read `*.csproj`/`global.json` for .NET SDK version; note NuGet cache
- **Ruby**: Read `.ruby-version` or `Gemfile` for Ruby version; note Bundler cache
---
### 7. Save Progress
**Save this step's accumulated work to `{outputFile}`.**
- **If `{outputFile}` does not exist** (first save), create it with YAML frontmatter:
```yaml
---
stepsCompleted: ['step-01-preflight']
lastStep: 'step-01-preflight'
lastSaved: '{date}'
---
```
Then write this step's output below the frontmatter.
- **If `{outputFile}` already exists**, update:
- Add `'step-01-preflight'` to `stepsCompleted` array (only if not already present)
- Set `lastStep: 'step-01-preflight'`
- Set `lastSaved: '{date}'`
- Append this step's output to the appropriate section of the document.
Load next step: `{nextStepFile}`
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Step completed in full with required outputs
### ❌ SYSTEM FAILURE:
- Skipped sequence steps or missing outputs
**Master Rule:** Skipping steps is FORBIDDEN.

View File

@@ -0,0 +1,110 @@
---
name: 'step-01b-resume'
description: 'Resume interrupted workflow from last completed step'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 1b: Resume Workflow
## STEP GOAL
Resume an interrupted workflow by loading the existing progress document, displaying progress, verifying previously created artifacts, and routing to the next incomplete step.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: Output document with progress frontmatter
- Focus: Load progress and route to next step
- Limits: Do not re-execute completed steps
- Dependencies: Output document must exist from a previous run
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
### 1. Load Output Document
Read `{outputFile}` and parse YAML frontmatter for:
- `stepsCompleted` — array of completed step names
- `lastStep` — last completed step name
- `lastSaved` — timestamp of last save
**If `{outputFile}` does not exist**, display:
"⚠️ **No previous progress found.** There is no output document to resume from. Please use **[C] Create** to start a fresh workflow run."
**THEN:** Halt. Do not proceed.
---
### 2. Verify Previously Created Artifacts
Since this is a file-creation workflow, verify that artifacts from completed steps still exist on disk:
- If `step-02-generate-pipeline` is in `stepsCompleted`, check that the pipeline config file exists (e.g., `.github/workflows/test.yml` or equivalent)
- If any expected artifact is missing, warn the user and suggest re-running from the step that creates it
---
### 3. Display Progress Dashboard
Display:
"📋 **Workflow Resume — CI/CD Pipeline Setup**
**Last saved:** {lastSaved}
**Steps completed:** {stepsCompleted.length} of 4
1. Preflight Checks (step-01-preflight) — {✅ if in stepsCompleted, ⬜ otherwise}
2. Generate Pipeline (step-02-generate-pipeline) — {✅ if in stepsCompleted, ⬜ otherwise}
3. Configure Quality Gates (step-03-configure-quality-gates) — {✅ if in stepsCompleted, ⬜ otherwise}
4. Validate & Summary (step-04-validate-and-summary) — {✅ if in stepsCompleted, ⬜ otherwise}"
---
### 4. Route to Next Step
Based on `lastStep`, load the next incomplete step:
- `'step-01-preflight'` → Load `./step-02-generate-pipeline.md`
- `'step-02-generate-pipeline'` → Load `./step-03-configure-quality-gates.md`
- `'step-03-configure-quality-gates'` → Load `./step-04-validate-and-summary.md`
- `'step-04-validate-and-summary'`**Workflow already complete.** Display: "✅ **All steps completed.** Use **[V] Validate** to review outputs or **[E] Edit** to make revisions." Then halt.
**If `lastStep` does not match any value above**, display: "⚠️ **Unknown progress state** (`lastStep`: {lastStep}). Please use **[C] Create** to start fresh." Then halt.
**Otherwise**, load the identified step file, read completely, and execute.
The existing content in `{outputFile}` provides context from previously completed steps. Use it as reference for remaining steps.
---
## 🚨 SYSTEM SUCCESS/FAILURE METRICS
### ✅ SUCCESS:
- Output document loaded and parsed correctly
- Previously created artifacts verified
- Progress dashboard displayed accurately
- Routed to correct next step
### ❌ SYSTEM FAILURE:
- Not loading output document
- Incorrect progress display
- Routing to wrong step
- Re-executing completed steps
**Master Rule:** Resume MUST route to the exact next incomplete step. Never re-execute completed steps.

View File

@@ -0,0 +1,110 @@
---
name: 'step-01b-resume'
description: 'Resume interrupted workflow from last completed step'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 1b: Resume Workflow
## STEP GOAL
Resume an interrupted workflow by loading the existing progress document, displaying progress, verifying previously created artifacts, and routing to the next incomplete step.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: Output document with progress frontmatter
- Focus: Load progress and route to next step
- Limits: Do not re-execute completed steps
- Dependencies: Output document must exist from a previous run
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
### 1. Load Output Document
Read `{outputFile}` and parse YAML frontmatter for:
- `stepsCompleted` — array of completed step names
- `lastStep` — last completed step name
- `lastSaved` — timestamp of last save
**If `{outputFile}` does not exist**, display:
"⚠️ **No previous progress found.** There is no output document to resume from. Please use **[C] Create** to start a fresh workflow run."
**THEN:** Halt. Do not proceed.
---
### 2. Verify Previously Created Artifacts
Since this is a file-creation workflow, verify that artifacts from completed steps still exist on disk:
- If `step-02-generate-pipeline` is in `stepsCompleted`, check that the pipeline config file exists (e.g., `.github/workflows/test.yml` or equivalent)
- If any expected artifact is missing, warn the user and suggest re-running from the step that creates it
---
### 3. Display Progress Dashboard
Display:
"📋 **Workflow Resume — CI/CD Pipeline Setup**
**Last saved:** {lastSaved}
**Steps completed:** {stepsCompleted.length} of 4
1. Preflight Checks (step-01-preflight) — {✅ if in stepsCompleted, ⬜ otherwise}
2. Generate Pipeline (step-02-generate-pipeline) — {✅ if in stepsCompleted, ⬜ otherwise}
3. Configure Quality Gates (step-03-configure-quality-gates) — {✅ if in stepsCompleted, ⬜ otherwise}
4. Validate & Summary (step-04-validate-and-summary) — {✅ if in stepsCompleted, ⬜ otherwise}"
---
### 4. Route to Next Step
Based on `lastStep`, load the next incomplete step:
- `'step-01-preflight'` → Load `./step-02-generate-pipeline.md`
- `'step-02-generate-pipeline'` → Load `./step-03-configure-quality-gates.md`
- `'step-03-configure-quality-gates'` → Load `./step-04-validate-and-summary.md`
- `'step-04-validate-and-summary'`**Workflow already complete.** Display: "✅ **All steps completed.** Use **[V] Validate** to review outputs or **[E] Edit** to make revisions." Then halt.
**If `lastStep` does not match any value above**, display: "⚠️ **Unknown progress state** (`lastStep`: {lastStep}). Please use **[C] Create** to start fresh." Then halt.
**Otherwise**, load the identified step file, read completely, and execute.
The existing content in `{outputFile}` provides context from previously completed steps. Use it as reference for remaining steps.
---
## 🚨 SYSTEM SUCCESS/FAILURE METRICS
### ✅ SUCCESS:
- Output document loaded and parsed correctly
- Previously created artifacts verified
- Progress dashboard displayed accurately
- Routed to correct next step
### ❌ SYSTEM FAILURE:
- Not loading output document
- Incorrect progress display
- Routing to wrong step
- Re-executing completed steps
**Master Rule:** Resume MUST route to the exact next incomplete step. Never re-execute completed steps.

View File

@@ -0,0 +1,279 @@
---
name: 'step-02-generate-pipeline'
description: 'Generate CI pipeline configuration with adaptive orchestration (agent-team, subagent, or sequential)'
nextStepFile: './step-03-configure-quality-gates.md'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 2: Generate CI Pipeline
## STEP GOAL
Create platform-specific CI configuration with test execution, sharding, burn-in, and artifacts.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
- ✅ Resolve execution mode from explicit user request first, then config
- ✅ Apply fallback rules deterministically when requested mode is unsupported
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Record outputs before proceeding
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: config, loaded artifacts, and knowledge fragments
- Focus: this step's goal only
- Limits: do not execute future steps
- Dependencies: prior steps' outputs (if any)
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
## 0. Resolve Execution Mode (User Override First)
```javascript
const orchestrationContext = {
config: {
execution_mode: config.tea_execution_mode || 'auto', // "auto" | "subagent" | "agent-team" | "sequential"
capability_probe: config.tea_capability_probe !== false, // true by default
},
timestamp: new Date().toISOString().replace(/[:.]/g, '-'),
};
const normalizeUserExecutionMode = (mode) => {
if (typeof mode !== 'string') return null;
const normalized = mode.trim().toLowerCase().replace(/[-_]/g, ' ').replace(/\s+/g, ' ');
if (normalized === 'auto') return 'auto';
if (normalized === 'sequential') return 'sequential';
if (normalized === 'subagent' || normalized === 'sub agent' || normalized === 'subagents' || normalized === 'sub agents') {
return 'subagent';
}
if (normalized === 'agent team' || normalized === 'agent teams' || normalized === 'agentteam') {
return 'agent-team';
}
return null;
};
const normalizeConfigExecutionMode = (mode) => {
if (mode === 'subagent') return 'subagent';
if (mode === 'auto' || mode === 'sequential' || mode === 'subagent' || mode === 'agent-team') {
return mode;
}
return null;
};
// Explicit user instruction in the active run takes priority over config.
const explicitModeFromUser = normalizeUserExecutionMode(runtime.getExplicitExecutionModeHint?.() || null);
const requestedMode = explicitModeFromUser || normalizeConfigExecutionMode(orchestrationContext.config.execution_mode) || 'auto';
const probeEnabled = orchestrationContext.config.capability_probe;
const supports = { subagent: false, agentTeam: false };
if (probeEnabled) {
supports.subagent = runtime.canLaunchSubagents?.() === true;
supports.agentTeam = runtime.canLaunchAgentTeams?.() === true;
}
let resolvedMode = requestedMode;
if (requestedMode === 'auto') {
if (supports.agentTeam) resolvedMode = 'agent-team';
else if (supports.subagent) resolvedMode = 'subagent';
else resolvedMode = 'sequential';
} else if (probeEnabled && requestedMode === 'agent-team' && !supports.agentTeam) {
resolvedMode = supports.subagent ? 'subagent' : 'sequential';
} else if (probeEnabled && requestedMode === 'subagent' && !supports.subagent) {
resolvedMode = 'sequential';
}
```
Resolution precedence:
1. Explicit user request in this run (`agent team` => `agent-team`; `subagent` => `subagent`; `sequential`; `auto`)
2. `tea_execution_mode` from config
3. Runtime capability fallback (when probing enabled)
## 1. Resolve Output Path and Select Template
Determine the pipeline output file path based on the detected `ci_platform`:
| CI Platform | Output Path | Template File |
| ---------------- | ------------------------------------------- | --------------------------------------------------- |
| `github-actions` | `{project-root}/.github/workflows/test.yml` | `{installed_path}/github-actions-template.yaml` |
| `gitlab-ci` | `{project-root}/.gitlab-ci.yml` | `{installed_path}/gitlab-ci-template.yaml` |
| `jenkins` | `{project-root}/Jenkinsfile` | `{installed_path}/jenkins-pipeline-template.groovy` |
| `azure-devops` | `{project-root}/azure-pipelines.yml` | `{installed_path}/azure-pipelines-template.yaml` |
| `harness` | `{project-root}/.harness/pipeline.yaml` | `{installed_path}/harness-pipeline-template.yaml` |
| `circle-ci` | `{project-root}/.circleci/config.yml` | _(no template; generate from first principles)_ |
Use templates from `{installed_path}` when available. Adapt the template to the project's `test_stack_type` and `test_framework`.
---
## Security: Script Injection Prevention
> **CRITICAL:** Treat `${{ inputs.* }}` and the entire `${{ github.event.* }}` namespace as unsafe by default. ALWAYS route them through `env:` intermediaries and reference as double-quoted `"$ENV_VAR"` in `run:` blocks. NEVER interpolate them directly.
When the generated pipeline is extended into reusable workflows (`on: workflow_call`), manual dispatch (`on: workflow_dispatch`), or composite actions, these values become user-controllable and can inject arbitrary shell commands.
**Two rules for generated `run:` blocks:**
1. **No direct interpolation** — pass unsafe contexts through `env:`, reference as `"$ENV_VAR"`
2. **Inputs must be DATA, not COMMANDS** — never accept command-shaped inputs (e.g., `inputs.install-command`) that get executed as shell code. Even through `env:`, running `$CMD` where CMD comes from an input is still command injection. Use fixed commands and pass inputs only as arguments.
```yaml
# ✅ SAFE — input is DATA interpolated into a fixed command
- name: Run tests
env:
TEST_GREP: ${{ inputs.test-grep }}
run: |
# Security: inputs passed through env: to prevent script injection
npx playwright test --grep "$TEST_GREP"
# ❌ NEVER — direct GitHub expression injection
- name: Run tests
run: |
npx playwright test --grep "${{ inputs.test-grep }}"
# ❌ NEVER — executing input-derived env var as a command
- name: Install
env:
CMD: ${{ inputs.install-command }}
run: $CMD
```
Include a `# Security: inputs passed through env: to prevent script injection` comment in generated YAML wherever this pattern is applied.
**Safe contexts** (do NOT need `env:` intermediaries): `${{ steps.*.outputs.* }}`, `${{ matrix.* }}`, `${{ runner.os }}`, `${{ github.sha }}`, `${{ github.ref }}`, `${{ secrets.* }}`, `${{ env.* }}`.
---
## 2. Pipeline Stages
Include stages:
- lint
- test (parallel shards)
- contract-test (if `tea_use_pactjs_utils` enabled)
- burn-in (flaky detection)
- report (aggregate + publish)
---
## 3. Test Execution
- Parallel sharding enabled
- CI retries configured
- Capture artifacts (HTML report, JUnit XML, traces/videos on failure)
- Cache dependencies (language-appropriate: node_modules, .venv, .m2, go module cache, NuGet, bundler)
Write the selected pipeline configuration to the resolved output path from step 1. Adjust test commands based on `test_stack_type` and `test_framework`:
- **Frontend/Fullstack**: Include browser install, E2E/component test commands, Playwright/Cypress artifacts
- **Backend (Node.js)**: Use `npm test` or framework-specific commands (`vitest`, `jest`), skip browser install
- **Backend (Python)**: Use `pytest` with coverage (`pytest --cov`), install via `pip install -r requirements.txt` or `poetry install`
- **Backend (Java/Kotlin)**: Use `mvn test` or `gradle test`, cache `.m2/repository` or `.gradle/caches`
- **Backend (Go)**: Use `go test ./...` with coverage (`-coverprofile`), cache Go modules
- **Backend (C#/.NET)**: Use `dotnet test` with coverage, restore NuGet packages
- **Backend (Ruby)**: Use `bundle exec rspec` with coverage, cache `vendor/bundle`
### Contract Testing Pipeline (if `tea_use_pactjs_utils` enabled)
When `tea_use_pactjs_utils` is enabled, add a `contract-test` stage after `test`:
**Required env block** (add to the generated pipeline):
```yaml
env:
PACT_BROKER_BASE_URL: ${{ secrets.PACT_BROKER_BASE_URL }}
PACT_BROKER_TOKEN: ${{ secrets.PACT_BROKER_TOKEN }}
GITHUB_SHA: ${{ github.sha }} # auto-set by GitHub Actions
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }} # NOT auto-set — must be defined explicitly
```
> **Note:** `GITHUB_SHA` is auto-set by GitHub Actions, but `GITHUB_BRANCH` is **not** — it must be derived from `github.head_ref` (for PRs) or `github.ref_name` (for pushes). The pactjs-utils library reads both from `process.env`.
1. **Consumer test + publish**: Run consumer contract tests, then publish pacts to broker
- `npm run test:pact:consumer`
- `npm run publish:pact`
- Only publish on PR and main branch pushes
2. **Provider verification**: Run provider verification against published pacts
- `npm run test:pact:provider:remote:contract`
- `buildVerifierOptions` auto-reads `PACT_BROKER_BASE_URL`, `PACT_BROKER_TOKEN`, `GITHUB_SHA`, `GITHUB_BRANCH`
- Verification results published to broker when `CI=true`
3. **Can-I-Deploy gate**: Block deployment if contracts are incompatible
- `npm run can:i:deploy:provider`
- Ensure the script adds `--retry-while-unknown 6 --retry-interval 10` for async verification
4. **Webhook job**: Add `repository_dispatch` trigger for `pact_changed` event
- Provider verification runs when consumers publish new pacts
- Ensures compatibility is checked on both consumer and provider changes
5. **Breaking change handling**: When `PACT_BREAKING_CHANGE=true` env var is set:
- Provider test passes `includeMainAndDeployed: false` to `buildVerifierOptions` — verifies only matching branch
- Coordinate with consumer team before removing the flag
6. **Record deployment**: After successful deployment, record version in broker
- `npm run record:provider:deployment --env=production`
Required CI secrets: `PACT_BROKER_BASE_URL`, `PACT_BROKER_TOKEN`
**If `tea_pact_mcp` is `"mcp"`:** Reference the SmartBear MCP `Can I Deploy` and `Matrix` tools for pipeline guidance in `pact-mcp.md`.
---
### 4. Save Progress
**Save this step's accumulated work to `{outputFile}`.**
- **If `{outputFile}` does not exist** (first save), create it with YAML frontmatter:
```yaml
---
stepsCompleted: ['step-02-generate-pipeline']
lastStep: 'step-02-generate-pipeline'
lastSaved: '{date}'
---
```
Then write this step's output below the frontmatter.
- **If `{outputFile}` already exists**, update:
- Add `'step-02-generate-pipeline'` to `stepsCompleted` array (only if not already present)
- Set `lastStep: 'step-02-generate-pipeline'`
- Set `lastSaved: '{date}'`
- Append this step's output to the appropriate section of the document.
### 5. Orchestration Notes for This Step
For this step, treat these work units as parallelizable when `resolvedMode` is `agent-team` or `subagent`:
- Worker A: resolve platform path/template and produce base pipeline skeleton (section 1)
- Worker B: construct stage definitions and test execution blocks (sections 2-3)
- Worker C: contract-testing block (only when `tea_use_pactjs_utils` is true)
If `resolvedMode` is `sequential`, execute sections 1→4 in order.
Load next step: `{nextStepFile}`
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Step completed in full with required outputs
### ❌ SYSTEM FAILURE:
- Skipped sequence steps or missing outputs
**Master Rule:** Skipping steps is FORBIDDEN.

View File

@@ -0,0 +1,279 @@
---
name: 'step-02-generate-pipeline'
description: 'Generate CI pipeline configuration with adaptive orchestration (agent-team, subagent, or sequential)'
nextStepFile: './step-03-configure-quality-gates.md'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 2: Generate CI Pipeline
## STEP GOAL
Create platform-specific CI configuration with test execution, sharding, burn-in, and artifacts.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
- ✅ Resolve execution mode from explicit user request first, then config
- ✅ Apply fallback rules deterministically when requested mode is unsupported
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Record outputs before proceeding
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: config, loaded artifacts, and knowledge fragments
- Focus: this step's goal only
- Limits: do not execute future steps
- Dependencies: prior steps' outputs (if any)
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
## 0. Resolve Execution Mode (User Override First)
```javascript
const orchestrationContext = {
config: {
execution_mode: config.tea_execution_mode || 'auto', // "auto" | "subagent" | "agent-team" | "sequential"
capability_probe: config.tea_capability_probe !== false, // true by default
},
timestamp: new Date().toISOString().replace(/[:.]/g, '-'),
};
const normalizeUserExecutionMode = (mode) => {
if (typeof mode !== 'string') return null;
const normalized = mode.trim().toLowerCase().replace(/[-_]/g, ' ').replace(/\s+/g, ' ');
if (normalized === 'auto') return 'auto';
if (normalized === 'sequential') return 'sequential';
if (normalized === 'subagent' || normalized === 'sub agent' || normalized === 'subagents' || normalized === 'sub agents') {
return 'subagent';
}
if (normalized === 'agent team' || normalized === 'agent teams' || normalized === 'agentteam') {
return 'agent-team';
}
return null;
};
const normalizeConfigExecutionMode = (mode) => {
if (mode === 'subagent') return 'subagent';
if (mode === 'auto' || mode === 'sequential' || mode === 'subagent' || mode === 'agent-team') {
return mode;
}
return null;
};
// Explicit user instruction in the active run takes priority over config.
const explicitModeFromUser = normalizeUserExecutionMode(runtime.getExplicitExecutionModeHint?.() || null);
const requestedMode = explicitModeFromUser || normalizeConfigExecutionMode(orchestrationContext.config.execution_mode) || 'auto';
const probeEnabled = orchestrationContext.config.capability_probe;
const supports = { subagent: false, agentTeam: false };
if (probeEnabled) {
supports.subagent = runtime.canLaunchSubagents?.() === true;
supports.agentTeam = runtime.canLaunchAgentTeams?.() === true;
}
let resolvedMode = requestedMode;
if (requestedMode === 'auto') {
if (supports.agentTeam) resolvedMode = 'agent-team';
else if (supports.subagent) resolvedMode = 'subagent';
else resolvedMode = 'sequential';
} else if (probeEnabled && requestedMode === 'agent-team' && !supports.agentTeam) {
resolvedMode = supports.subagent ? 'subagent' : 'sequential';
} else if (probeEnabled && requestedMode === 'subagent' && !supports.subagent) {
resolvedMode = 'sequential';
}
```
Resolution precedence:
1. Explicit user request in this run (`agent team` => `agent-team`; `subagent` => `subagent`; `sequential`; `auto`)
2. `tea_execution_mode` from config
3. Runtime capability fallback (when probing enabled)
## 1. Resolve Output Path and Select Template
Determine the pipeline output file path based on the detected `ci_platform`:
| CI Platform | Output Path | Template File |
| ---------------- | ------------------------------------------- | --------------------------------------------------- |
| `github-actions` | `{project-root}/.github/workflows/test.yml` | `{installed_path}/github-actions-template.yaml` |
| `gitlab-ci` | `{project-root}/.gitlab-ci.yml` | `{installed_path}/gitlab-ci-template.yaml` |
| `jenkins` | `{project-root}/Jenkinsfile` | `{installed_path}/jenkins-pipeline-template.groovy` |
| `azure-devops` | `{project-root}/azure-pipelines.yml` | `{installed_path}/azure-pipelines-template.yaml` |
| `harness` | `{project-root}/.harness/pipeline.yaml` | `{installed_path}/harness-pipeline-template.yaml` |
| `circle-ci` | `{project-root}/.circleci/config.yml` | _(no template; generate from first principles)_ |
Use templates from `{installed_path}` when available. Adapt the template to the project's `test_stack_type` and `test_framework`.
---
## Security: Script Injection Prevention
> **CRITICAL:** Treat `${{ inputs.* }}` and the entire `${{ github.event.* }}` namespace as unsafe by default. ALWAYS route them through `env:` intermediaries and reference as double-quoted `"$ENV_VAR"` in `run:` blocks. NEVER interpolate them directly.
When the generated pipeline is extended into reusable workflows (`on: workflow_call`), manual dispatch (`on: workflow_dispatch`), or composite actions, these values become user-controllable and can inject arbitrary shell commands.
**Two rules for generated `run:` blocks:**
1. **No direct interpolation** — pass unsafe contexts through `env:`, reference as `"$ENV_VAR"`
2. **Inputs must be DATA, not COMMANDS** — never accept command-shaped inputs (e.g., `inputs.install-command`) that get executed as shell code. Even through `env:`, running `$CMD` where CMD comes from an input is still command injection. Use fixed commands and pass inputs only as arguments.
```yaml
# ✅ SAFE — input is DATA interpolated into a fixed command
- name: Run tests
env:
TEST_GREP: ${{ inputs.test-grep }}
run: |
# Security: inputs passed through env: to prevent script injection
npx playwright test --grep "$TEST_GREP"
# ❌ NEVER — direct GitHub expression injection
- name: Run tests
run: |
npx playwright test --grep "${{ inputs.test-grep }}"
# ❌ NEVER — executing input-derived env var as a command
- name: Install
env:
CMD: ${{ inputs.install-command }}
run: $CMD
```
Include a `# Security: inputs passed through env: to prevent script injection` comment in generated YAML wherever this pattern is applied.
**Safe contexts** (do NOT need `env:` intermediaries): `${{ steps.*.outputs.* }}`, `${{ matrix.* }}`, `${{ runner.os }}`, `${{ github.sha }}`, `${{ github.ref }}`, `${{ secrets.* }}`, `${{ env.* }}`.
---
## 2. Pipeline Stages
Include stages:
- lint
- test (parallel shards)
- contract-test (if `tea_use_pactjs_utils` enabled)
- burn-in (flaky detection)
- report (aggregate + publish)
---
## 3. Test Execution
- Parallel sharding enabled
- CI retries configured
- Capture artifacts (HTML report, JUnit XML, traces/videos on failure)
- Cache dependencies (language-appropriate: node_modules, .venv, .m2, go module cache, NuGet, bundler)
Write the selected pipeline configuration to the resolved output path from step 1. Adjust test commands based on `test_stack_type` and `test_framework`:
- **Frontend/Fullstack**: Include browser install, E2E/component test commands, Playwright/Cypress artifacts
- **Backend (Node.js)**: Use `npm test` or framework-specific commands (`vitest`, `jest`), skip browser install
- **Backend (Python)**: Use `pytest` with coverage (`pytest --cov`), install via `pip install -r requirements.txt` or `poetry install`
- **Backend (Java/Kotlin)**: Use `mvn test` or `gradle test`, cache `.m2/repository` or `.gradle/caches`
- **Backend (Go)**: Use `go test ./...` with coverage (`-coverprofile`), cache Go modules
- **Backend (C#/.NET)**: Use `dotnet test` with coverage, restore NuGet packages
- **Backend (Ruby)**: Use `bundle exec rspec` with coverage, cache `vendor/bundle`
### Contract Testing Pipeline (if `tea_use_pactjs_utils` enabled)
When `tea_use_pactjs_utils` is enabled, add a `contract-test` stage after `test`:
**Required env block** (add to the generated pipeline):
```yaml
env:
PACT_BROKER_BASE_URL: ${{ secrets.PACT_BROKER_BASE_URL }}
PACT_BROKER_TOKEN: ${{ secrets.PACT_BROKER_TOKEN }}
GITHUB_SHA: ${{ github.sha }} # auto-set by GitHub Actions
GITHUB_BRANCH: ${{ github.head_ref || github.ref_name }} # NOT auto-set — must be defined explicitly
```
> **Note:** `GITHUB_SHA` is auto-set by GitHub Actions, but `GITHUB_BRANCH` is **not** — it must be derived from `github.head_ref` (for PRs) or `github.ref_name` (for pushes). The pactjs-utils library reads both from `process.env`.
1. **Consumer test + publish**: Run consumer contract tests, then publish pacts to broker
- `npm run test:pact:consumer`
- `npm run publish:pact`
- Only publish on PR and main branch pushes
2. **Provider verification**: Run provider verification against published pacts
- `npm run test:pact:provider:remote:contract`
- `buildVerifierOptions` auto-reads `PACT_BROKER_BASE_URL`, `PACT_BROKER_TOKEN`, `GITHUB_SHA`, `GITHUB_BRANCH`
- Verification results published to broker when `CI=true`
3. **Can-I-Deploy gate**: Block deployment if contracts are incompatible
- `npm run can:i:deploy:provider`
- Ensure the script adds `--retry-while-unknown 6 --retry-interval 10` for async verification
4. **Webhook job**: Add `repository_dispatch` trigger for `pact_changed` event
- Provider verification runs when consumers publish new pacts
- Ensures compatibility is checked on both consumer and provider changes
5. **Breaking change handling**: When `PACT_BREAKING_CHANGE=true` env var is set:
- Provider test passes `includeMainAndDeployed: false` to `buildVerifierOptions` — verifies only matching branch
- Coordinate with consumer team before removing the flag
6. **Record deployment**: After successful deployment, record version in broker
- `npm run record:provider:deployment --env=production`
Required CI secrets: `PACT_BROKER_BASE_URL`, `PACT_BROKER_TOKEN`
**If `tea_pact_mcp` is `"mcp"`:** Reference the SmartBear MCP `Can I Deploy` and `Matrix` tools for pipeline guidance in `pact-mcp.md`.
---
### 4. Save Progress
**Save this step's accumulated work to `{outputFile}`.**
- **If `{outputFile}` does not exist** (first save), create it with YAML frontmatter:
```yaml
---
stepsCompleted: ['step-02-generate-pipeline']
lastStep: 'step-02-generate-pipeline'
lastSaved: '{date}'
---
```
Then write this step's output below the frontmatter.
- **If `{outputFile}` already exists**, update:
- Add `'step-02-generate-pipeline'` to `stepsCompleted` array (only if not already present)
- Set `lastStep: 'step-02-generate-pipeline'`
- Set `lastSaved: '{date}'`
- Append this step's output to the appropriate section of the document.
### 5. Orchestration Notes for This Step
For this step, treat these work units as parallelizable when `resolvedMode` is `agent-team` or `subagent`:
- Worker A: resolve platform path/template and produce base pipeline skeleton (section 1)
- Worker B: construct stage definitions and test execution blocks (sections 2-3)
- Worker C: contract-testing block (only when `tea_use_pactjs_utils` is true)
If `resolvedMode` is `sequential`, execute sections 1→4 in order.
Load next step: `{nextStepFile}`
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Step completed in full with required outputs
### ❌ SYSTEM FAILURE:
- Skipped sequence steps or missing outputs
**Master Rule:** Skipping steps is FORBIDDEN.

View File

@@ -0,0 +1,135 @@
---
name: 'step-03-configure-quality-gates'
description: 'Configure burn-in, quality gates, and notifications'
nextStepFile: './step-04-validate-and-summary.md'
knowledgeIndex: '{project-root}/_bmad/tea/testarch/tea-index.csv'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 3: Quality Gates & Notifications
## STEP GOAL
Configure burn-in loops, quality thresholds, and notification hooks.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Record outputs before proceeding
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: config, loaded artifacts, and knowledge fragments
- Focus: this step's goal only
- Limits: do not execute future steps
- Dependencies: prior steps' outputs (if any)
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
## 1. Burn-In Configuration
Use `{knowledgeIndex}` to load `ci-burn-in.md` guidance:
- Run N-iteration burn-in for flaky detection
- Gate promotion based on burn-in stability
**Stack-conditional burn-in:**
- **Frontend or Fullstack** (`test_stack_type` is `frontend` or `fullstack`): Enable burn-in by default. Burn-in targets UI flakiness (race conditions, selector instability, timing issues).
- **Backend only** (`test_stack_type` is `backend`): Skip burn-in by default. Backend tests (unit, integration, API) are deterministic and rarely exhibit UI-related flakiness. If the user explicitly requests burn-in for backend, honor that override.
**Security: Script injection prevention for reusable burn-in workflows:**
When burn-in is extracted into a reusable workflow (`on: workflow_call`), all `${{ inputs.* }}` values MUST be passed through `env:` intermediaries and referenced as quoted `"$ENV_VAR"`. Never interpolate them directly.
**Inputs must be DATA, not COMMANDS.** Do not accept command-shaped inputs (e.g., `inputs.install-command`, `inputs.test-command`) that get executed as shell code — even through `env:`, running `$CMD` is still command injection. Use fixed commands (e.g., `npm ci`, `npx playwright test`) and pass inputs only as data arguments.
```yaml
# ✅ SAFE — fixed commands with data-only inputs
- name: Install dependencies
run: npm ci
- name: Run burn-in loop
env:
TEST_GREP: ${{ inputs.test-grep }}
BURN_IN_COUNT: ${{ inputs.burn-in-count }}
BASE_REF: ${{ inputs.base-ref }}
run: |
# Security: inputs passed through env: to prevent script injection
for i in $(seq 1 "$BURN_IN_COUNT"); do
echo "Burn-in iteration $i/$BURN_IN_COUNT"
npx playwright test --grep "$TEST_GREP" || exit 1
done
```
---
## 2. Quality Gates
Define:
- Minimum pass rates (P0 = 100%, P1 ≥ 95%)
- Fail CI on critical test failures
- Optional: require traceability or nfr-assess output before release
**Contract testing gate** (if `tea_use_pactjs_utils` is enabled):
- **can-i-deploy must pass** before any deployment to staging or production
- Block the deployment pipeline if contract verification fails
- Treat consumer pact publishing failures as CI failures (contracts must stay up-to-date)
- Provider verification must pass for all consumer pacts before merge
---
## 3. Notifications
Configure:
- Failure notifications (Slack/email)
- Artifact links
---
### 4. Save Progress
**Save this step's accumulated work to `{outputFile}`.**
- **If `{outputFile}` does not exist** (first save), create it with YAML frontmatter:
```yaml
---
stepsCompleted: ['step-03-configure-quality-gates']
lastStep: 'step-03-configure-quality-gates'
lastSaved: '{date}'
---
```
Then write this step's output below the frontmatter.
- **If `{outputFile}` already exists**, update:
- Add `'step-03-configure-quality-gates'` to `stepsCompleted` array (only if not already present)
- Set `lastStep: 'step-03-configure-quality-gates'`
- Set `lastSaved: '{date}'`
- Append this step's output to the appropriate section of the document.
Load next step: `{nextStepFile}`
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Step completed in full with required outputs
### ❌ SYSTEM FAILURE:
- Skipped sequence steps or missing outputs
**Master Rule:** Skipping steps is FORBIDDEN.

View File

@@ -0,0 +1,135 @@
---
name: 'step-03-configure-quality-gates'
description: 'Configure burn-in, quality gates, and notifications'
nextStepFile: './step-04-validate-and-summary.md'
knowledgeIndex: '{project-root}/_bmad/tea/testarch/tea-index.csv'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 3: Quality Gates & Notifications
## STEP GOAL
Configure burn-in loops, quality thresholds, and notification hooks.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Record outputs before proceeding
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: config, loaded artifacts, and knowledge fragments
- Focus: this step's goal only
- Limits: do not execute future steps
- Dependencies: prior steps' outputs (if any)
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
## 1. Burn-In Configuration
Use `{knowledgeIndex}` to load `ci-burn-in.md` guidance:
- Run N-iteration burn-in for flaky detection
- Gate promotion based on burn-in stability
**Stack-conditional burn-in:**
- **Frontend or Fullstack** (`test_stack_type` is `frontend` or `fullstack`): Enable burn-in by default. Burn-in targets UI flakiness (race conditions, selector instability, timing issues).
- **Backend only** (`test_stack_type` is `backend`): Skip burn-in by default. Backend tests (unit, integration, API) are deterministic and rarely exhibit UI-related flakiness. If the user explicitly requests burn-in for backend, honor that override.
**Security: Script injection prevention for reusable burn-in workflows:**
When burn-in is extracted into a reusable workflow (`on: workflow_call`), all `${{ inputs.* }}` values MUST be passed through `env:` intermediaries and referenced as quoted `"$ENV_VAR"`. Never interpolate them directly.
**Inputs must be DATA, not COMMANDS.** Do not accept command-shaped inputs (e.g., `inputs.install-command`, `inputs.test-command`) that get executed as shell code — even through `env:`, running `$CMD` is still command injection. Use fixed commands (e.g., `npm ci`, `npx playwright test`) and pass inputs only as data arguments.
```yaml
# ✅ SAFE — fixed commands with data-only inputs
- name: Install dependencies
run: npm ci
- name: Run burn-in loop
env:
TEST_GREP: ${{ inputs.test-grep }}
BURN_IN_COUNT: ${{ inputs.burn-in-count }}
BASE_REF: ${{ inputs.base-ref }}
run: |
# Security: inputs passed through env: to prevent script injection
for i in $(seq 1 "$BURN_IN_COUNT"); do
echo "Burn-in iteration $i/$BURN_IN_COUNT"
npx playwright test --grep "$TEST_GREP" || exit 1
done
```
---
## 2. Quality Gates
Define:
- Minimum pass rates (P0 = 100%, P1 ≥ 95%)
- Fail CI on critical test failures
- Optional: require traceability or nfr-assess output before release
**Contract testing gate** (if `tea_use_pactjs_utils` is enabled):
- **can-i-deploy must pass** before any deployment to staging or production
- Block the deployment pipeline if contract verification fails
- Treat consumer pact publishing failures as CI failures (contracts must stay up-to-date)
- Provider verification must pass for all consumer pacts before merge
---
## 3. Notifications
Configure:
- Failure notifications (Slack/email)
- Artifact links
---
### 4. Save Progress
**Save this step's accumulated work to `{outputFile}`.**
- **If `{outputFile}` does not exist** (first save), create it with YAML frontmatter:
```yaml
---
stepsCompleted: ['step-03-configure-quality-gates']
lastStep: 'step-03-configure-quality-gates'
lastSaved: '{date}'
---
```
Then write this step's output below the frontmatter.
- **If `{outputFile}` already exists**, update:
- Add `'step-03-configure-quality-gates'` to `stepsCompleted` array (only if not already present)
- Set `lastStep: 'step-03-configure-quality-gates'`
- Set `lastSaved: '{date}'`
- Append this step's output to the appropriate section of the document.
Load next step: `{nextStepFile}`
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Step completed in full with required outputs
### ❌ SYSTEM FAILURE:
- Skipped sequence steps or missing outputs
**Master Rule:** Skipping steps is FORBIDDEN.

View File

@@ -0,0 +1,92 @@
---
name: 'step-04-validate-and-summary'
description: 'Validate pipeline and summarize'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 4: Validate & Summarize
## STEP GOAL
Validate CI configuration and report completion details.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Record outputs before proceeding
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: config, loaded artifacts, and knowledge fragments
- Focus: this step's goal only
- Limits: do not execute future steps
- Dependencies: prior steps' outputs (if any)
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
## 1. Validation
Validate against `checklist.md`:
- Config file created
- Stages and sharding configured
- Burn-in and artifacts enabled
- Secrets/variables documented
Fix gaps before completion.
---
## 2. Completion Summary
Report:
- CI platform and config path
- Key stages enabled
- Artifacts and notifications
- Next steps (set secrets, run pipeline)
---
### 3. Save Progress
**Save this step's accumulated work to `{outputFile}`.**
- **If `{outputFile}` does not exist** (first save), create it with YAML frontmatter:
```yaml
---
stepsCompleted: ['step-04-validate-and-summary']
lastStep: 'step-04-validate-and-summary'
lastSaved: '{date}'
---
```
Then write this step's output below the frontmatter.
- **If `{outputFile}` already exists**, update:
- Add `'step-04-validate-and-summary'` to `stepsCompleted` array (only if not already present)
- Set `lastStep: 'step-04-validate-and-summary'`
- Set `lastSaved: '{date}'`
- Append this step's output to the appropriate section of the document.
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Step completed in full with required outputs
### ❌ SYSTEM FAILURE:
- Skipped sequence steps or missing outputs
**Master Rule:** Skipping steps is FORBIDDEN.

View File

@@ -0,0 +1,92 @@
---
name: 'step-04-validate-and-summary'
description: 'Validate pipeline and summarize'
outputFile: '{test_artifacts}/ci-pipeline-progress.md'
---
# Step 4: Validate & Summarize
## STEP GOAL
Validate CI configuration and report completion details.
## MANDATORY EXECUTION RULES
- 📖 Read the entire step file before acting
- ✅ Speak in `{communication_language}`
---
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Record outputs before proceeding
- 📖 Load the next step only when instructed
## CONTEXT BOUNDARIES:
- Available context: config, loaded artifacts, and knowledge fragments
- Focus: this step's goal only
- Limits: do not execute future steps
- Dependencies: prior steps' outputs (if any)
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly. Do not skip, reorder, or improvise.
## 1. Validation
Validate against `checklist.md`:
- Config file created
- Stages and sharding configured
- Burn-in and artifacts enabled
- Secrets/variables documented
Fix gaps before completion.
---
## 2. Completion Summary
Report:
- CI platform and config path
- Key stages enabled
- Artifacts and notifications
- Next steps (set secrets, run pipeline)
---
### 3. Save Progress
**Save this step's accumulated work to `{outputFile}`.**
- **If `{outputFile}` does not exist** (first save), create it with YAML frontmatter:
```yaml
---
stepsCompleted: ['step-04-validate-and-summary']
lastStep: 'step-04-validate-and-summary'
lastSaved: '{date}'
---
```
Then write this step's output below the frontmatter.
- **If `{outputFile}` already exists**, update:
- Add `'step-04-validate-and-summary'` to `stepsCompleted` array (only if not already present)
- Set `lastStep: 'step-04-validate-and-summary'`
- Set `lastSaved: '{date}'`
- Append this step's output to the appropriate section of the document.
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Step completed in full with required outputs
### ❌ SYSTEM FAILURE:
- Skipped sequence steps or missing outputs
**Master Rule:** Skipping steps is FORBIDDEN.

View File

@@ -0,0 +1,65 @@
---
name: 'step-01-assess'
description: 'Load an existing output for editing'
nextStepFile: './step-02-apply-edit.md'
---
# Step 1: Assess Edit Target
## STEP GOAL:
Identify which output should be edited and load it.
## MANDATORY EXECUTION RULES (READ FIRST):
### Universal Rules:
- 📖 Read the complete step file before taking any action
- ✅ Speak in `{communication_language}`
### Role Reinforcement:
- ✅ You are the Master Test Architect
### Step-Specific Rules:
- 🎯 Ask the user which output file to edit
- 🚫 Do not edit until target is confirmed
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
## CONTEXT BOUNDARIES:
- Available context: existing outputs
- Focus: select edit target
- Limits: no edits yet
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly.
### 1. Identify Target
Ask the user to provide the output file path or select from known outputs.
### 2. Load Target
Read the provided output file in full.
### 3. Confirm
Confirm the target and proceed to edit.
Load next step: `{nextStepFile}`
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Target identified and loaded
### ❌ SYSTEM FAILURE:
- Proceeding without a confirmed target

View File

@@ -0,0 +1,65 @@
---
name: 'step-01-assess'
description: 'Load an existing output for editing'
nextStepFile: './step-02-apply-edit.md'
---
# Step 1: Assess Edit Target
## STEP GOAL:
Identify which output should be edited and load it.
## MANDATORY EXECUTION RULES (READ FIRST):
### Universal Rules:
- 📖 Read the complete step file before taking any action
- ✅ Speak in `{communication_language}`
### Role Reinforcement:
- ✅ You are the Master Test Architect
### Step-Specific Rules:
- 🎯 Ask the user which output file to edit
- 🚫 Do not edit until target is confirmed
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
## CONTEXT BOUNDARIES:
- Available context: existing outputs
- Focus: select edit target
- Limits: no edits yet
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly.
### 1. Identify Target
Ask the user to provide the output file path or select from known outputs.
### 2. Load Target
Read the provided output file in full.
### 3. Confirm
Confirm the target and proceed to edit.
Load next step: `{nextStepFile}`
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Target identified and loaded
### ❌ SYSTEM FAILURE:
- Proceeding without a confirmed target

View File

@@ -0,0 +1,60 @@
---
name: 'step-02-apply-edit'
description: 'Apply edits to the selected output'
---
# Step 2: Apply Edits
## STEP GOAL:
Apply the requested edits to the selected output and confirm changes.
## MANDATORY EXECUTION RULES (READ FIRST):
### Universal Rules:
- 📖 Read the complete step file before taking any action
- ✅ Speak in `{communication_language}`
### Role Reinforcement:
- ✅ You are the Master Test Architect
### Step-Specific Rules:
- 🎯 Only apply edits explicitly requested by the user
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
## CONTEXT BOUNDARIES:
- Available context: selected output and user changes
- Focus: apply edits only
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly.
### 1. Confirm Requested Changes
Restate what will be changed and confirm.
### 2. Apply Changes
Update the output file accordingly.
### 3. Report
Summarize the edits applied.
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Changes applied and confirmed
### ❌ SYSTEM FAILURE:
- Unconfirmed edits or missing update

View File

@@ -0,0 +1,60 @@
---
name: 'step-02-apply-edit'
description: 'Apply edits to the selected output'
---
# Step 2: Apply Edits
## STEP GOAL:
Apply the requested edits to the selected output and confirm changes.
## MANDATORY EXECUTION RULES (READ FIRST):
### Universal Rules:
- 📖 Read the complete step file before taking any action
- ✅ Speak in `{communication_language}`
### Role Reinforcement:
- ✅ You are the Master Test Architect
### Step-Specific Rules:
- 🎯 Only apply edits explicitly requested by the user
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
## CONTEXT BOUNDARIES:
- Available context: selected output and user changes
- Focus: apply edits only
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly.
### 1. Confirm Requested Changes
Restate what will be changed and confirm.
### 2. Apply Changes
Update the output file accordingly.
### 3. Report
Summarize the edits applied.
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Changes applied and confirmed
### ❌ SYSTEM FAILURE:
- Unconfirmed edits or missing update

View File

@@ -0,0 +1,81 @@
---
name: 'step-01-validate'
description: 'Validate workflow outputs against checklist'
outputFile: '{test_artifacts}/ci-validation-report.md'
validationChecklist: '../checklist.md'
---
# Step 1: Validate Outputs
## STEP GOAL:
Validate outputs using the workflow checklist and record findings.
## MANDATORY EXECUTION RULES (READ FIRST):
### Universal Rules:
- 📖 Read the complete step file before taking any action
- ✅ Speak in `{communication_language}`
### Role Reinforcement:
- ✅ You are the Master Test Architect
### Step-Specific Rules:
- 🎯 Validate against `{validationChecklist}`
- 🚫 Do not skip checks
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Write findings to `{outputFile}`
## CONTEXT BOUNDARIES:
- Available context: workflow outputs and checklist
- Focus: validation only
- Limits: do not modify outputs in this step
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly.
### 1. Load Checklist
Read `{validationChecklist}` and list all criteria.
### 2. Validate Outputs
Evaluate outputs against each checklist item.
### 2a. Script Injection Scan
Scan all generated YAML workflow files for unsafe interpolation patterns inside `run:` blocks.
**Unsafe patterns to flag (FAIL):**
- `${{ inputs.* }}` — all workflow inputs are user-controllable
- `${{ github.event.* }}` — treat the entire event namespace as unsafe by default (includes PR titles, issue bodies, comment bodies, label names, etc.)
- `${{ github.head_ref }}` — PR source branch name (user-controlled)
**Detection method:** For each `run:` block in generated YAML, check if any of the above expressions appears in the run script body. If found, flag as **FAIL** with the exact line and recommend converting to the safe `env:` intermediary pattern (pass through `env:`, reference as double-quoted `"$ENV_VAR"`).
**Safe patterns to ignore** (exempt from flagging): `${{ steps.*.outputs.* }}`, `${{ matrix.* }}`, `${{ runner.os }}`, `${{ github.sha }}`, `${{ github.ref }}`, `${{ secrets.* }}`, `${{ env.* }}` — these are safe from GitHub expression injection when used in `run:` blocks.
### 3. Write Report
Write a validation report to `{outputFile}` with PASS/WARN/FAIL per section.
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Validation report written
- All checklist items evaluated
### ❌ SYSTEM FAILURE:
- Skipped checklist items
- No report produced

View File

@@ -0,0 +1,81 @@
---
name: 'step-01-validate'
description: 'Validate workflow outputs against checklist'
outputFile: '{test_artifacts}/ci-validation-report.md'
validationChecklist: '../checklist.md'
---
# Step 1: Validate Outputs
## STEP GOAL:
Validate outputs using the workflow checklist and record findings.
## MANDATORY EXECUTION RULES (READ FIRST):
### Universal Rules:
- 📖 Read the complete step file before taking any action
- ✅ Speak in `{communication_language}`
### Role Reinforcement:
- ✅ You are the Master Test Architect
### Step-Specific Rules:
- 🎯 Validate against `{validationChecklist}`
- 🚫 Do not skip checks
## EXECUTION PROTOCOLS:
- 🎯 Follow the MANDATORY SEQUENCE exactly
- 💾 Write findings to `{outputFile}`
## CONTEXT BOUNDARIES:
- Available context: workflow outputs and checklist
- Focus: validation only
- Limits: do not modify outputs in this step
## MANDATORY SEQUENCE
**CRITICAL:** Follow this sequence exactly.
### 1. Load Checklist
Read `{validationChecklist}` and list all criteria.
### 2. Validate Outputs
Evaluate outputs against each checklist item.
### 2a. Script Injection Scan
Scan all generated YAML workflow files for unsafe interpolation patterns inside `run:` blocks.
**Unsafe patterns to flag (FAIL):**
- `${{ inputs.* }}` — all workflow inputs are user-controllable
- `${{ github.event.* }}` — treat the entire event namespace as unsafe by default (includes PR titles, issue bodies, comment bodies, label names, etc.)
- `${{ github.head_ref }}` — PR source branch name (user-controlled)
**Detection method:** For each `run:` block in generated YAML, check if any of the above expressions appears in the run script body. If found, flag as **FAIL** with the exact line and recommend converting to the safe `env:` intermediary pattern (pass through `env:`, reference as double-quoted `"$ENV_VAR"`).
**Safe patterns to ignore** (exempt from flagging): `${{ steps.*.outputs.* }}`, `${{ matrix.* }}`, `${{ runner.os }}`, `${{ github.sha }}`, `${{ github.ref }}`, `${{ secrets.* }}`, `${{ env.* }}` — these are safe from GitHub expression injection when used in `run:` blocks.
### 3. Write Report
Write a validation report to `{outputFile}` with PASS/WARN/FAIL per section.
## 🚨 SYSTEM SUCCESS/FAILURE METRICS:
### ✅ SUCCESS:
- Validation report written
- All checklist items evaluated
### ❌ SYSTEM FAILURE:
- Skipped checklist items
- No report produced

View File

@@ -0,0 +1,72 @@
---
validationDate: 2026-01-27
workflowName: testarch-ci
workflowPath: {project-root}/src/workflows/testarch/ci
validationStatus: COMPLETE
completionDate: 2026-01-27 10:03:10
---
# Validation Report: testarch-ci
**Validation Started:** 2026-01-27 09:50:21
**Validator:** BMAD Workflow Validation System (Codex)
**Standards Version:** BMAD Workflow Standards
## File Structure & Size
- workflow.md present: YES
- instructions.md present: YES
- workflow.yaml present: YES
- step files found: 7
**Step File Sizes:**
- steps-c/step-01-preflight.md: 87 lines [GOOD]
- steps-c/step-02-generate-pipeline.md: 75 lines [GOOD]
- steps-c/step-03-configure-quality-gates.md: 67 lines [GOOD]
- steps-c/step-04-validate-and-summary.md: 60 lines [GOOD]
- steps-e/step-01-assess.md: 51 lines [GOOD]
- steps-e/step-02-apply-edit.md: 46 lines [GOOD]
- steps-v/step-01-validate.md: 53 lines [GOOD]
- workflow-plan.md present: YES
## Frontmatter Validation
- No frontmatter violations found
## Critical Path Violations
- No {project-root} hardcoded paths detected in body
- No dead relative links detected
## Menu Handling Validation
- No menu structures detected (linear step flow) [N/A]
## Step Type Validation
- Last step steps-v/step-01-validate.md has no nextStepFile (final step OK)
- Step type validation assumes linear sequence (no branching/menu). Workflow-plan.md present for reference. [INFO]
## Output Format Validation
- No templates found in workflow root
- Steps with outputFile in frontmatter:
- steps-c/step-02-generate-pipeline.md
- steps-v/step-01-validate.md
## Validation Design Check
- checklist.md present: YES
- Validation steps folder (steps-v) present: YES
## Instruction Style Check
- All steps include STEP GOAL, MANDATORY EXECUTION RULES, EXECUTION PROTOCOLS, CONTEXT BOUNDARIES, and SUCCESS/FAILURE metrics
## Summary
- Validation completed: 2026-01-27 10:03:10
- Critical issues: 0
- Warnings: 0 (informational notes only)
- Readiness: READY (manual review optional)

View File

@@ -0,0 +1,72 @@
---
validationDate: 2026-01-27
workflowName: testarch-ci
workflowPath: {project-root}/src/workflows/testarch/ci
validationStatus: COMPLETE
completionDate: 2026-01-27 10:03:10
---
# Validation Report: testarch-ci
**Validation Started:** 2026-01-27 09:50:21
**Validator:** BMAD Workflow Validation System (Codex)
**Standards Version:** BMAD Workflow Standards
## File Structure & Size
- workflow.md present: YES
- instructions.md present: YES
- workflow.yaml present: YES
- step files found: 7
**Step File Sizes:**
- steps-c/step-01-preflight.md: 87 lines [GOOD]
- steps-c/step-02-generate-pipeline.md: 75 lines [GOOD]
- steps-c/step-03-configure-quality-gates.md: 67 lines [GOOD]
- steps-c/step-04-validate-and-summary.md: 60 lines [GOOD]
- steps-e/step-01-assess.md: 51 lines [GOOD]
- steps-e/step-02-apply-edit.md: 46 lines [GOOD]
- steps-v/step-01-validate.md: 53 lines [GOOD]
- workflow-plan.md present: YES
## Frontmatter Validation
- No frontmatter violations found
## Critical Path Violations
- No {project-root} hardcoded paths detected in body
- No dead relative links detected
## Menu Handling Validation
- No menu structures detected (linear step flow) [N/A]
## Step Type Validation
- Last step steps-v/step-01-validate.md has no nextStepFile (final step OK)
- Step type validation assumes linear sequence (no branching/menu). Workflow-plan.md present for reference. [INFO]
## Output Format Validation
- No templates found in workflow root
- Steps with outputFile in frontmatter:
- steps-c/step-02-generate-pipeline.md
- steps-v/step-01-validate.md
## Validation Design Check
- checklist.md present: YES
- Validation steps folder (steps-v) present: YES
## Instruction Style Check
- All steps include STEP GOAL, MANDATORY EXECUTION RULES, EXECUTION PROTOCOLS, CONTEXT BOUNDARIES, and SUCCESS/FAILURE metrics
## Summary
- Validation completed: 2026-01-27 10:03:10
- Critical issues: 0
- Warnings: 0 (informational notes only)
- Readiness: READY (manual review optional)

View File

@@ -0,0 +1,114 @@
---
validationDate: 2026-01-27
workflowName: testarch-ci
workflowPath: {project-root}/src/workflows/testarch/ci
validationStatus: COMPLETE
completionDate: 2026-01-27 10:24:01
---
# Validation Report: testarch-ci
**Validation Started:** 2026-01-27 10:24:01
**Validator:** BMAD Workflow Validation System (Codex)
**Standards Version:** BMAD Workflow Standards
## File Structure & Size
- workflow.md present: YES
- instructions.md present: YES
- workflow.yaml present: YES
- step files found: 7
**Step File Sizes:**
- steps-c/step-01-preflight.md: 86 lines [GOOD]
- steps-c/step-02-generate-pipeline.md: 74 lines [GOOD]
- steps-c/step-03-configure-quality-gates.md: 66 lines [GOOD]
- steps-c/step-04-validate-and-summary.md: 59 lines [GOOD]
- steps-e/step-01-assess.md: 50 lines [GOOD]
- steps-e/step-02-apply-edit.md: 45 lines [GOOD]
- steps-v/step-01-validate.md: 52 lines [GOOD]
- workflow-plan.md present: YES
## Frontmatter Validation
- No frontmatter violations found
## Critical Path Violations
### Config Variables (Exceptions)
Standard BMAD config variables treated as valid exceptions: bmb_creations_output_folder, communication_language, document_output_language, output_folder, planning_artifacts, project-root, project_name, test_artifacts, user_name
- No {project-root} hardcoded paths detected in body
- No dead relative links detected
- No module path assumptions detected
**Status:** ✅ PASS - No critical violations
## Menu Handling Validation
- No menu structures detected (linear step flow) [N/A]
## Step Type Validation
- steps-c/step-01-preflight.md: Init [PASS]
- steps-c/step-02-generate-pipeline.md: Middle [PASS]
- steps-c/step-03-configure-quality-gates.md: Middle [PASS]
- steps-c/step-04-validate-and-summary.md: Final [PASS]
- Step type validation assumes linear sequence (no branching/menu). Workflow-plan.md present for reference. [INFO]
## Output Format Validation
- Templates present: NONE
- Steps with outputFile in frontmatter:
- steps-c/step-02-generate-pipeline.md
- steps-v/step-01-validate.md
- checklist.md present: YES
## Validation Design Check
- Validation steps folder (steps-v) present: YES
- Validation step(s) present: step-01-validate.md
- Validation steps reference checklist data and auto-proceed
## Instruction Style Check
- Instruction style: Prescriptive (appropriate for TEA quality/compliance workflows)
- Steps emphasize mandatory sequence, explicit success/failure metrics, and risk-based guidance
## Collaborative Experience Check
- Overall facilitation quality: GOOD
- Steps use progressive prompts and clear role reinforcement; no laundry-list interrogation detected
- Flow progression is clear and aligned to workflow goals
## Subagent Optimization Opportunities
- No high-priority subagent optimizations identified; workflow already uses step-file architecture
- Pattern 1 (grep/regex): N/A for most steps
- Pattern 2 (per-file analysis): already aligned to validation structure
- Pattern 3 (data ops): minimal data file loads
- Pattern 4 (parallel): optional for validation only
## Cohesive Review
- Overall assessment: GOOD
- Flow is linear, goals are clear, and outputs map to TEA artifacts
- Voice and tone consistent with Test Architect persona
- Recommendation: READY (minor refinements optional)
## Plan Quality Validation
- Plan file present: workflow-plan.md
- Planned steps found: 7 (all implemented)
- Plan implementation status: Fully Implemented
## Summary
- Validation completed: 2026-01-27 10:24:01
- Critical issues: 0
- Warnings: 0 (informational notes only)
- Readiness: READY (manual review optional)

View File

@@ -0,0 +1,114 @@
---
validationDate: 2026-01-27
workflowName: testarch-ci
workflowPath: {project-root}/src/workflows/testarch/ci
validationStatus: COMPLETE
completionDate: 2026-01-27 10:24:01
---
# Validation Report: testarch-ci
**Validation Started:** 2026-01-27 10:24:01
**Validator:** BMAD Workflow Validation System (Codex)
**Standards Version:** BMAD Workflow Standards
## File Structure & Size
- workflow.md present: YES
- instructions.md present: YES
- workflow.yaml present: YES
- step files found: 7
**Step File Sizes:**
- steps-c/step-01-preflight.md: 86 lines [GOOD]
- steps-c/step-02-generate-pipeline.md: 74 lines [GOOD]
- steps-c/step-03-configure-quality-gates.md: 66 lines [GOOD]
- steps-c/step-04-validate-and-summary.md: 59 lines [GOOD]
- steps-e/step-01-assess.md: 50 lines [GOOD]
- steps-e/step-02-apply-edit.md: 45 lines [GOOD]
- steps-v/step-01-validate.md: 52 lines [GOOD]
- workflow-plan.md present: YES
## Frontmatter Validation
- No frontmatter violations found
## Critical Path Violations
### Config Variables (Exceptions)
Standard BMAD config variables treated as valid exceptions: bmb_creations_output_folder, communication_language, document_output_language, output_folder, planning_artifacts, project-root, project_name, test_artifacts, user_name
- No {project-root} hardcoded paths detected in body
- No dead relative links detected
- No module path assumptions detected
**Status:** ✅ PASS - No critical violations
## Menu Handling Validation
- No menu structures detected (linear step flow) [N/A]
## Step Type Validation
- steps-c/step-01-preflight.md: Init [PASS]
- steps-c/step-02-generate-pipeline.md: Middle [PASS]
- steps-c/step-03-configure-quality-gates.md: Middle [PASS]
- steps-c/step-04-validate-and-summary.md: Final [PASS]
- Step type validation assumes linear sequence (no branching/menu). Workflow-plan.md present for reference. [INFO]
## Output Format Validation
- Templates present: NONE
- Steps with outputFile in frontmatter:
- steps-c/step-02-generate-pipeline.md
- steps-v/step-01-validate.md
- checklist.md present: YES
## Validation Design Check
- Validation steps folder (steps-v) present: YES
- Validation step(s) present: step-01-validate.md
- Validation steps reference checklist data and auto-proceed
## Instruction Style Check
- Instruction style: Prescriptive (appropriate for TEA quality/compliance workflows)
- Steps emphasize mandatory sequence, explicit success/failure metrics, and risk-based guidance
## Collaborative Experience Check
- Overall facilitation quality: GOOD
- Steps use progressive prompts and clear role reinforcement; no laundry-list interrogation detected
- Flow progression is clear and aligned to workflow goals
## Subagent Optimization Opportunities
- No high-priority subagent optimizations identified; workflow already uses step-file architecture
- Pattern 1 (grep/regex): N/A for most steps
- Pattern 2 (per-file analysis): already aligned to validation structure
- Pattern 3 (data ops): minimal data file loads
- Pattern 4 (parallel): optional for validation only
## Cohesive Review
- Overall assessment: GOOD
- Flow is linear, goals are clear, and outputs map to TEA artifacts
- Voice and tone consistent with Test Architect persona
- Recommendation: READY (minor refinements optional)
## Plan Quality Validation
- Plan file present: workflow-plan.md
- Planned steps found: 7 (all implemented)
- Plan implementation status: Fully Implemented
## Summary
- Validation completed: 2026-01-27 10:24:01
- Critical issues: 0
- Warnings: 0 (informational notes only)
- Readiness: READY (manual review optional)

View File

@@ -0,0 +1,20 @@
# Workflow Plan: testarch-ci
## Create Mode (steps-c)
- step-01-preflight.md
- step-02-generate-pipeline.md
- step-03-configure-quality-gates.md
- step-04-validate-and-summary.md
## Validate Mode (steps-v)
- step-01-validate.md
## Edit Mode (steps-e)
- step-01-assess.md
- step-02-apply-edit.md
## Outputs
- CI config (e.g., {project-root}/.github/workflows/test.yml)
- Pipeline guidance and artifacts configuration

View File

@@ -0,0 +1,20 @@
# Workflow Plan: testarch-ci
## Create Mode (steps-c)
- step-01-preflight.md
- step-02-generate-pipeline.md
- step-03-configure-quality-gates.md
- step-04-validate-and-summary.md
## Validate Mode (steps-v)
- step-01-validate.md
## Edit Mode (steps-e)
- step-01-assess.md
- step-02-apply-edit.md
## Outputs
- CI config (e.g., {project-root}/.github/workflows/test.yml)
- Pipeline guidance and artifacts configuration

View File

@@ -0,0 +1,41 @@
---
name: testarch-ci
description: Scaffold CI/CD quality pipeline with test execution. Use when user says 'lets setup CI pipeline' or 'I want to create quality gates'
web_bundle: true
---
# CI/CD Pipeline Setup
**Goal:** Scaffold CI/CD quality pipeline with test execution, burn-in loops, and artifact collection
**Role:** You are the Master Test Architect.
---
## WORKFLOW ARCHITECTURE
This workflow uses **tri-modal step-file architecture**:
- **Create mode (steps-c/)**: primary execution flow
- **Validate mode (steps-v/)**: validation against checklist
- **Edit mode (steps-e/)**: revise existing outputs
---
## INITIALIZATION SEQUENCE
### 1. Mode Determination
"Welcome to the workflow. What would you like to do?"
- **[C] Create** — Run the workflow
- **[R] Resume** — Resume an interrupted workflow
- **[V] Validate** — Validate existing outputs
- **[E] Edit** — Edit existing outputs
### 2. Route to First Step
- **If C:** Load `steps-c/step-01-preflight.md`
- **If R:** Load `steps-c/step-01b-resume.md`
- **If V:** Load `steps-v/step-01-validate.md`
- **If E:** Load `steps-e/step-01-assess.md`

View File

@@ -0,0 +1,41 @@
---
name: testarch-ci
description: Scaffold CI/CD quality pipeline with test execution. Use when user says 'lets setup CI pipeline' or 'I want to create quality gates'
web_bundle: true
---
# CI/CD Pipeline Setup
**Goal:** Scaffold CI/CD quality pipeline with test execution, burn-in loops, and artifact collection
**Role:** You are the Master Test Architect.
---
## WORKFLOW ARCHITECTURE
This workflow uses **tri-modal step-file architecture**:
- **Create mode (steps-c/)**: primary execution flow
- **Validate mode (steps-v/)**: validation against checklist
- **Edit mode (steps-e/)**: revise existing outputs
---
## INITIALIZATION SEQUENCE
### 1. Mode Determination
"Welcome to the workflow. What would you like to do?"
- **[C] Create** — Run the workflow
- **[R] Resume** — Resume an interrupted workflow
- **[V] Validate** — Validate existing outputs
- **[E] Edit** — Edit existing outputs
### 2. Route to First Step
- **If C:** Load `steps-c/step-01-preflight.md`
- **If R:** Load `steps-c/step-01b-resume.md`
- **If V:** Load `steps-v/step-01-validate.md`
- **If E:** Load `steps-e/step-01-assess.md`

View File

@@ -0,0 +1,48 @@
# Test Architect workflow: ci
name: testarch-ci
# prettier-ignore
description: 'Scaffold CI/CD quality pipeline with test execution. Use when the user says "lets setup CI pipeline" or "I want to create quality gates"'
# Critical variables from config
config_source: "{project-root}/_bmad/tea/config.yaml"
output_folder: "{config_source}:output_folder"
test_artifacts: "{config_source}:test_artifacts"
user_name: "{config_source}:user_name"
communication_language: "{config_source}:communication_language"
document_output_language: "{config_source}:document_output_language"
date: system-generated
# Workflow components
installed_path: "{project-root}/_bmad/tea/workflows/testarch/ci"
instructions: "{installed_path}/instructions.md"
validation: "{installed_path}/checklist.md"
# Variables and inputs
variables:
ci_platform: "auto" # auto, github-actions, gitlab-ci, circle-ci, jenkins, azure-devops, harness - user can override
test_dir: "{project-root}/tests" # Root test directory
test_stack_type: "auto" # auto, frontend, backend, fullstack - detected or user override
test_framework: "auto" # auto, playwright, cypress, jest, vitest - detected or user override
# Output configuration (resolved dynamically based on ci_platform detection)
default_output_file: "{project-root}/.github/workflows/test.yml" # GitHub Actions default; overridden per platform
# Required tools
required_tools:
- read_file # Read .nvmrc, package.json, framework config
- write_file # Create CI config, scripts, documentation
- create_directory # Create .github/workflows/ or .gitlab-ci/ directories
- list_files # Detect existing CI configuration
- search_repo # Find test files for selective testing
tags:
- qa
- ci-cd
- test-architect
- pipeline
- automation
execution_hints:
interactive: false # Minimize prompts, auto-detect when possible
autonomous: true # Proceed without user input unless blocked
iterative: true

View File

@@ -0,0 +1,48 @@
# Test Architect workflow: ci
name: testarch-ci
# prettier-ignore
description: 'Scaffold CI/CD quality pipeline with test execution. Use when the user says "lets setup CI pipeline" or "I want to create quality gates"'
# Critical variables from config
config_source: "{project-root}/_bmad/tea/config.yaml"
output_folder: "{config_source}:output_folder"
test_artifacts: "{config_source}:test_artifacts"
user_name: "{config_source}:user_name"
communication_language: "{config_source}:communication_language"
document_output_language: "{config_source}:document_output_language"
date: system-generated
# Workflow components
installed_path: "{project-root}/_bmad/tea/workflows/testarch/ci"
instructions: "{installed_path}/instructions.md"
validation: "{installed_path}/checklist.md"
# Variables and inputs
variables:
ci_platform: "auto" # auto, github-actions, gitlab-ci, circle-ci, jenkins, azure-devops, harness - user can override
test_dir: "{project-root}/tests" # Root test directory
test_stack_type: "auto" # auto, frontend, backend, fullstack - detected or user override
test_framework: "auto" # auto, playwright, cypress, jest, vitest - detected or user override
# Output configuration (resolved dynamically based on ci_platform detection)
default_output_file: "{project-root}/.github/workflows/test.yml" # GitHub Actions default; overridden per platform
# Required tools
required_tools:
- read_file # Read .nvmrc, package.json, framework config
- write_file # Create CI config, scripts, documentation
- create_directory # Create .github/workflows/ or .gitlab-ci/ directories
- list_files # Detect existing CI configuration
- search_repo # Find test files for selective testing
tags:
- qa
- ci-cd
- test-architect
- pipeline
- automation
execution_hints:
interactive: false # Minimize prompts, auto-detect when possible
autonomous: true # Proceed without user input unless blocked
iterative: true