diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e81e997..251ec3d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -172,12 +172,35 @@ Cursor rules live in `Cursor/` as `.mdc` files, organized by technology stack.
---
+## Contributing QA Documentation
+
+QA-related contributions should follow the same principles as other content types, with specific attention to quality standards.
+
+### Test Documentation
+
+- Add detailed test documentation to `Claude/rules/test-documentation.md`
+- Follow the standards for test case structure, execution reporting, and defect reporting
+- Ensure alignment with ISTQB best practices
+
+### Exploratory Testing
+
+- Contribute exploratory testing practices to `Claude/skills/exploratory-testing.md`
+- Include practical examples of charters, session reports, and techniques
+- Document integration with Agile workflows
+
+### Agent Integration
+
+- Reference the `qa-specialist` agent in relevant PR descriptions
+- Ensure new testing approaches complement existing automated testing strategies
+- Coordinate with existing skills like `test-automation` and `tdd-workflow`
+
## Pull Request Guidelines
- **One logical change per PR** — separate unrelated additions into separate PRs.
- **Fill in the PR template** — describe what you added, why it's useful, and how you tested it.
- **Update the README** if you add a new agent, language rule set, or notable skill.
- **Review your own diff** before requesting review — check for typos, formatting issues, and broken links.
+- **For QA-related contributions**, reference the relevant agents (qa-specialist), skills (exploratory-testing), and rules (test-documentation, testing) to ensure alignment with quality standards.
---
@@ -190,4 +213,5 @@ Before opening a PR, confirm:
- [ ] YAML frontmatter is present and valid (for agents and skills).
- [ ] Content is accurate, concise, and free of typos.
- [ ] No sensitive information (credentials, internal URLs, proprietary data) is included.
-- [ ] README updated if the addition warrants it.
+ - [ ] README updated if the addition warrants it.
+ - [ ] QA-related additions reference appropriate agents, skills, and rules.
diff --git a/Claude/agents/qa-specialist.md b/Claude/agents/qa-specialist.md
new file mode 100644
index 0000000..a17087a
--- /dev/null
+++ b/Claude/agents/qa-specialist.md
@@ -0,0 +1,258 @@
+---
+name: qa-specialist
+description: QA engineering specialist focused on comprehensive manual and automated testing. Use PROACTIVELY when implementing new features, fixing bugs, or improving test coverage. Expert in Playwright E2E testing, Appium mobile testing, accessibility validation, and performance testing.
+tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
+model: sonnet
+---
+
+# QA Specialist
+
+You are an expert QA engineering specialist focused on comprehensive software quality assurance, combining both automated and manual testing methodologies. Your mission is to ensure exceptional product quality through rigorous testing practices.
+
+## Core Responsibilities
+
+1. **Automated Testing** — Develop and maintain comprehensive automated test suites
+2. **Manual Testing** — Execute thorough exploratory and scenario-based manual testing
+3. **Accessibility Testing** — Ensure WCAG compliance and inclusive user experience
+4. **Performance Testing** — Evaluate application performance under various conditions
+5. **Mobile Testing** — Validate functionality on Android and iOS platforms
+6. **Defect Management** — Report, track, and verify resolution of issues
+
+## Expertise Areas
+
+### Playwright E2E Testing
+
+```bash
+# Playwright setup and execution
+cd /path/to/project
+npm install @playwright/test
+npx playwright install-deps
+npx playwright test
+```
+
+```typescript
+// Example Playwright test
+import { test, expect } from '@playwright/test'
+
+test('user can complete registration', async ({ page }) => {
+ await page.goto('/register')
+
+ // Fill registration form
+ await page.locator('#email').fill('test@example.com')
+ await page.locator('#password').fill('Test123!')
+ await page.locator('#confirm-password').fill('Test123!')
+
+ // Submit form
+ await page.locator('button[type="submit"]').click()
+
+ // Verify registration success
+ await expect(page.locator('.success-message')).toContainText('Registration successful')
+ await expect(page).toHaveURL('/dashboard')
+})
+```
+
+### Appium Mobile Testing
+
+```bash
+# Appium setup
+npm install -g appium
+appium &
+
+# Run mobile tests
+npm install wd
+cd /path/to/mobile-tests
+node test-login.js
+```
+
+```javascript
+// Example Appium test
+const wd = require('wd')
+const assert = require('assert')
+
+async function runTest() {
+ const driver = wd.promiseChainRemote('localhost', 4723)
+
+ const capabilities = {
+ platformName: 'Android',
+ deviceName: 'Pixel 4',
+ app: '/path/to/app.apk',
+ automationName: 'UiAutomator2'
+ }
+
+ await driver.init(capabilities)
+
+ // Test login functionality
+ await driver.elementById('email').sendKeys('test@example.com')
+ await driver.elementById('password').sendKeys('Test123!')
+ await driver.elementById('login-button').click()
+
+ // Verify login success
+ const welcomeText = await driver elementById('welcome-message').text()
+ assert.strictEqual(welcomeText, 'Welcome, Test User!')
+
+ await driver.quit()
+}
+
+runTest()
+```
+
+### Accessibility Testing
+
+```bash
+# Run accessibility audit
+npx axe-playwright --url https://example.com --output-format json --output-file axe-report.json
+
+# Generate HTML accessibility report
+npx axe-playwright --url https://example.com --reporter v2
+```
+
+```typescript
+// Example accessibility test with Playwright
+import { test, expect } from '@playwright/test'
+import { AccessibilityAudit } from 'axe-playwright'
+
+test('page meets accessibility standards', async ({ page }) => {
+ await page.goto('/checkout')
+
+ // Create accessibility audit
+ const audit = new AccessibilityAudit(page)
+ const results = await audit.analyze()
+
+ // Assert no violations
+ expect(results.violations.length).toBe(0)
+
+ // Log detailed results
+ if (results.violations.length > 0) {
+ console.log('Accessibility violations found:')
+ results.violations.forEach(violation => {
+ console.log(`- ${violation.id}: ${violation.help}`)
+ console.log(` Impact: ${violation.impact}`)
+ violation.nodes.forEach(node => {
+ console.log(` Element: ${node.html}`)
+ })
+ })
+ }
+})
+```
+
+### Performance Testing
+
+```bash
+# Run Lighthouse audit
+npx playwright lighthouse https://example.com --config=config/lighthouse.config.js --output=json --output-path=report.json
+
+# Generate HTML report
+npx playwright lighthouse https://example.com --output=html --output-path=report.html
+
+# Compare performance to baseline
+npx lighthouse-batch https://example.com --config=config/lighthouse.config.js --compare
+```
+
+```typescript
+// Example performance test
+import { test, expect } from '@playwright/test'
+
+const LighthouseThresholds = {
+ performance: 90,
+ accessibility: 95,
+ 'best-practices': 90,
+ seo: 90,
+ pwa: 85
+}
+
+test('meets performance thresholds', async ({ page }) => {
+ // Generate Lighthouse report
+ const report = await page.evaluate(async () => {
+ const lighthouse = await import('lighthouse/lighthouse-core/fraggle-rock/api.js')
+ const config = await import('../../../config/lighthouse.config.js')
+
+ const lhr = await lighthouse.gatherRunner.run()
+ return lhr
+ })
+
+ // Verify performance metrics
+ Object.entries(LighthouseThresholds).forEach(([category, threshold]) => {
+ const score = report.categories[category].score * 100
+ expect(score).toBeGreaterThanOrEqual(threshold)
+ })
+
+ // Verify Core Web Vitals
+ const timingMetrics = report.audits['metrics']
+ expect(timingMetrics.details.items[0]['largest-contentful-paint']).toBeLessThan(2500)
+ expect(timingMetrics.details.items[0]['total-blocking-time']).toBeLessThan(200)
+ expect(timingMetrics.details.items[0]['cumulative-layout-shift']).toBeLessThan(0.1)
+})
+```
+
+## Testing Methodology
+
+### 1. Test Planning
+- Review requirements and user stories
+- Identify test scenarios and edge cases
+- Create test cases with expected outcomes
+- Prioritize test cases by risk and impact
+
+### 2. Test Design
+- Develop automated test scripts
+- Design manual test procedures
+- Create test data
+- Set up test environments
+
+### 3. Test Execution
+- Run automated test suite
+- Execute manual test cases
+- Perform exploratory testing
+- Document test results
+
+### 4. Defect Management
+- Report defects with detailed reproduction steps
+- Categorize by severity and priority
+- Track through resolution
+- Verify fixes
+
+### 5. Test Reporting
+- Generate test summary reports
+- Analyze metrics and trends
+- Provide quality assessment
+- Recommend improvements
+
+## Key Principles
+
+1. **Test Early, Test Often** — Integrate testing throughout the development lifecycle
+2. **Quality is Everyone's Responsibility** — Collaborate with developers and product
+3. **Automate the Boring Parts** — Focus automation on repetitive, high-value tests
+4. **Manual Testing for the Complex** — Use human judgment for usability and edge cases
+5. **Accessibility by Default** — Build inclusive experiences from the start
+6. **Performance as a Feature** — Treat speed and responsiveness as product requirements
+
+## Test Coverage Requirements
+
+| Test Type | Target | Notes |
+|---------|--------|-------|
+| Unit Tests | 80%+ | Code coverage for individual functions |
+| Integration Tests | 80%+ | Coverage for API endpoints and data flows |
+| E2E Tests | Critical paths | Cover main user journeys with Playwright |
+| Accessibility | Level AA | Meet WCAG 2.1 AA standards |
+| Performance | Passing | Lighthouse performance score > 90 |
+| Mobile | iOS & Android | Test on both platforms |
+
+## Success Metrics
+
+- All critical user journeys covered by automated tests
+- Zero P0/P1 bugs in production
+- Test suite execution time under 10 minutes
+- 100% of accessibility issues resolved
+- Lighthouse performance score consistently above 90
+- Manual test coverage of edge cases and complex workflows
+
+## Reference
+
+For detailed testing patterns, frameworks, and examples, see skills:
+- `tdd-workflow` - Test-Driven Development workflow
+- `e2e-testing` - Playwright E2E testing patterns
+- `frontend-patterns` - Frontend development and accessibility patterns
+- `performance` - Web performance optimization
+
+---
+
+**Remember**: Testing is not about finding bugs — it's about building confidence. Every test you write makes the product more reliable and the team more confident. Be thorough, be systematic, but also be pragmatic. Quality is a journey, not a destination.
\ No newline at end of file
diff --git a/Claude/rules/test-documentation.md b/Claude/rules/test-documentation.md
new file mode 100644
index 0000000..b4f0800
--- /dev/null
+++ b/Claude/rules/test-documentation.md
@@ -0,0 +1,281 @@
+# Test Documentation Standards
+
+Comprehensive guidelines for documenting tests, results, and quality metrics across all projects. Ensures consistency, auditability, and knowledge sharing.
+
+## 1. Test Case Documentation
+
+### Required Fields
+
+All test cases must include:
+- **Test ID**: Unique identifier (e.g., TC-LOGIN-001)
+- **Title**: Clear, descriptive name of the test
+- **Description**: Purpose of the test and what it validates
+- **Preconditions**: System state required before test execution
+- **Test Steps**: Detailed, numbered steps to execute
+- **Test Data**: Specific data to use in the test
+- **Expected Result**: What should happen when test passes
+- **Actual Result**: What actually happened (filled during execution)
+- **Status**: Pass/Fail/Blocked/Skipped
+- **Priority**: High/Medium/Low
+- **Test Type**: Unit/Integration/E2E/Accessibility/Performance
+- **Automation Status**: Manual/Automated/Planned
+
+### Example
+
+```
+Test ID: TC-LOGIN-001
+Title: Successful login with valid credentials
+Description: Verify users can log in with correct email and password
+Preconditions: User account exists, system is available
+
+Test Steps:
+1. Navigate to login page
+2. Enter valid email address
+3. Enter valid password
+4. Click 'Sign In' button
+
+Test Data:
+- Email: valid-user@example.com
+- Password: Test123!
+
+Expected Result: User is redirected to dashboard, welcome message displayed
+Actual Result: [Filled during execution]
+Status: [Filled during execution]
+Priority: High
+Test Type: E2E
+Automation Status: Automated
+```
+
+## 2. Test Execution Reporting
+
+### Daily Test Summary
+
+```
+# Daily Test Execution Summary - YYYY-MM-DD
+
+## Execution Overview
+- Total Test Cases: XX
+- Executed: XX (XX%)
+- Passed: XX (XX%)
+- Failed: XX (XX%)
+- Blocked: XX (XX%)
+- Not Run: XX (XX%)
+
+## By Module
+| Module | Total | Executed | Pass | Fail | Block |
+|--------|-------|----------|------|------|-------|
+| Authentication | 15 | 15 | 14 | 1 | 0 |
+| Profile | 12 | 10 | 9 | 1 | 0 |
+| Payments | 20 | 18 | 18 | 0 | 0 |
+| Settings | 8 | 8 | 8 | 0 | 0 |
+
+## Critical Issues
+- [TC-ID] [Brief description of failed test]
+- [TC-ID] [Brief description of failed test]
+
+## Blockers
+- [Description of environment issue preventing test execution]
+- [Description of bug blocking dependent test cases]
+```
+
+### Sprint Test Report
+
+```
+# Sprint [Number] Test Report
+
+## Test Coverage
+| Metric | Target | Actual | Status |
+|--------|--------|--------|--------|
+| Unit Test Coverage | 80% | XX% | [Pass/Fail] |
+| Integration Test Coverage | 80% | XX% | [Pass/Fail] |
+| E2E Test Coverage | 100% of critical paths | XX% | [Pass/Fail] |
+| Accessibility | WCAG 2.1 AA | [Score] | [Pass/Fail] |
+| Performance | Lighthouse >90 | [Score] | [Pass/Fail] |
+
+## Defect Analysis
+| Severity | Open | Resolved | Reopened |
+|----------|------|----------|----------|
+| Critical | X | X | X |
+| High | X | X | X |
+| Medium | X | X | X |
+| Low | X | X | X |
+
+## Quality Trends
+- [Trend 1: e.g., Defect density decreasing week over week]
+- [Trend 2: e.g., Test execution time improved by X%]
+- [Trend 3: e.g., Automated test coverage increased by X%]
+
+## Risks and Concerns
+- [Risk 1: Detailed description with mitigation strategy]
+- [Risk 2: Detailed description with mitigation strategy]
+
+## Recommendations
+- [Recommendation 1: Specific, actionable item]
+- [Recommendation 2: Specific, actionable item]
+```
+
+## 3. Automated Test Documentation
+
+### Code Comments Standard
+
+```typescript
+/**
+ * TC-LOGIN-001: Successful login with valid credentials
+ *
+ * @description Verifies users can log in with correct email and password,
+ * are redirected to dashboard, and see welcome message
+ * @priority High
+ * @type E2E
+ * @automation automated
+ * @status active
+ * @reviewer jane-doe (2025-03-15)
+ */
+ test('user can log in with valid credentials', async ({ page }) => {
+ // Test implementation
+ });
+```
+
+### Test File Structure
+
+```
+src/tests/
+├── features/
+│ ├── auth/
+│ │ ├── login.spec.ts # E2E tests for login flow
+│ │ ├── register.spec.ts # E2E tests for registration
+│ │ └── password.spec.ts # E2E tests for password management
+│ ├── profile/
+│ └── payments/
+├── unit/
+│ ├── services/
+│ └── utils/
+├── integration/
+│ ├── api/
+│ └── database/
+└── accessibility/
+ └── wcag/
+ ├── perceivable.spec.ts
+ ├── operable.spec.ts
+ └── understandable.spec.ts
+```
+
+## 4. Exploratory Testing Documentation
+
+### Session Notes Template
+
+```
+# Exploratory Testing Notes - YYYY-MM-DD
+
+**Tester**: [Name]
+**Session Focus**: [Brief description of exploration area]
+**Duration**: [Start] to [End]
+**Environment**: [Browser, Device, OS, Version]
+
+## Hypotheses
+- [Initial assumption about system behavior]
+- [Another hypothesis to validate]
+
+## Observations
+- [Interesting behavior noticed]
+- [Unexpected system response]
+- [Pattern in user interface interactions]
+
+## Questions
+- [Question about intended functionality]
+- [Question about system limitations]
+
+## Ideas for Further Testing
+- [Specific test scenario to try later]
+- [Edge case to investigate]
+```
+
+## 5. Defect Reporting Standards
+
+### Required Fields
+
+- **Issue ID**: Auto-generated tracking ID
+- **Title**: Concise summary of the problem
+- **Description**: Detailed explanation of the issue
+- **Steps to Reproduce**: Numbered steps to recreate the issue
+- **Expected Result**: What should happen
+- **Actual Result**: What actually happens
+- **Environment**: Browser, OS, device, application version
+- **Attachments**: Screenshots, videos, logs, network traces
+- **Severity**: Critical/High/Medium/Low
+- **Priority**: Immediate/High/Medium/Low
+- **Status**: New/In Progress/Resolved/Verified/Closed
+- **Reproducibility**: Always/Intermittent/Unverified
+
+### Example
+
+```
+Title: Login fails when password contains special characters
+
+Description: When attempting to log in with a password containing certain special characters (!@#$%), the authentication request fails with a 500 error. This occurs consistently for passwords with these characters, but works fine with alphanumeric passwords.
+
+Steps to Reproduce:
+1. Navigate to login page
+2. Enter valid email address
+3. Enter password with special characters (e.g., P@ssw0rd!)
+4. Click 'Sign In' button
+
+Expected Result: User should be authenticated and redirected to dashboard
+Actual Result: Login button spins briefly, then error message "Authentication failed" appears
+
+Environment: Chrome 128.0.6613.138, macOS Sonoma 14.6, Application v2.4.1
+
+Attachments: login-error-screenshot.png, network-trace.har
+
+Severity: High
+Priority: High
+Status: New
+Reproducibility: Always
+```
+
+## 6. Test Metrics and KPIs
+
+### Key Quality Metrics
+
+| Metric | Calculation | Target | Reporting Frequency |
+|--------|-------------|--------|---------------------|
+| Test Coverage | (Tested Requirements / Total Requirements) × 100 | ≥80% | Daily |
+| Defect Density | Defects / KLOC (thousand lines of code) | ≤1.0 | Per Release |
+| Defect Escape Rate | Production Defects / (Total Defects + Production Defects) × 100 | ≤5% | Monthly |
+| Test Execution Pass Rate | Passed Tests / Executed Tests × 100 | ≥95% | Daily |
+| Test Automation Percentage | Automated Tests / Total Tests × 100 | ≥70% | Weekly |
+| Mean Time to Detect (MTTD) | Average time from defect introduction to discovery | ≤24 hours | Weekly |
+| Mean Time to Repair (MTTR) | Average time from defect detection to resolution | ≤4 hours | Weekly |
+
+
+### Quality Dashboard Elements
+
+A comprehensive quality dashboard should include:
+- Current test execution status
+- Trend charts for key metrics over time
+- Defect distribution by module and severity
+- Test coverage by component
+- Automated vs. manual test ratio
+- Build stability and deployment frequency
+- Customer-reported issues
+- Performance metrics
+
+## 7. Documentation Maintenance
+
+### Review Cadence
+- Test cases: Quarterly review and update
+- Test scripts: Update when related functionality changes
+- Test environment documentation: Update after any configuration changes
+- Quality reports: Generated automatically as part of CI/CD pipeline
+
+### Ownership
+- Test cases: QA Engineer responsible for the feature area
+- Test scripts: Developer and QA shared ownership
+- Test environments: DevOps and QA shared ownership
+- Quality metrics: QA Lead
+
+### Version Control
+All test documentation must be:
+- Stored in version control alongside code
+- Linked to requirements and user stories
+- Updated when related functionality changes
+- Reviewed as part of pull request process
\ No newline at end of file
diff --git a/Claude/rules/web/accessibility-testing.md b/Claude/rules/web/accessibility-testing.md
new file mode 100644
index 0000000..b5f3310
--- /dev/null
+++ b/Claude/rules/web/accessibility-testing.md
@@ -0,0 +1,244 @@
+---
+name: web-accessibility-testing
+description: >-
+ Web accessibility rules and guidelines to ensure inclusive experiences for all users.
+ Based on WCAG 2.2 AA compliance with practical implementation guidance.
+priority: CRITICAL
+---
+
+# Web Accessibility Rules
+
+> This file extends [common/accessibility.md](../common/accessibility.md) with web-specific content.
+
+## Core Principles
+
+### POUR Framework
+
+1. **Perceivable** - Information is presentable to users in ways they can perceive
+2. **Operable** - User interface components are operable
+3. **Understandable** - Information and operation are understandable
+4. **Robust** - Content is robust across current and future technologies
+
+### WCAG 2.2 AA Compliance
+
+Mandatory for all public-facing web applications. Must pass automated and manual testing.
+
+## Keyboard Navigation
+
+### Tab Order
+
+- Logical, meaningful sequence through content
+- Follow visual flow (left to right, top to bottom)
+- Skip links must be available for bypassing repetitive content
+- `tabindex="0"` for focusable elements, `tabindex="-1"` for programmatic focus only
+
+### Navigation Components
+
+```tsx
+// Skip navigation
+
+ Skip to main content
+
+
+// Navigation with proper landmarks
+
+```
+
+### Form Controls
+
+- All form fields must have associated labels
+- Use explicit label elements with `for` attributes
+- For visual-only labels, use `aria-label` or `aria-labelledby`
+- Group related controls with `fieldset` and `legend`
+
+```tsx
+// Proper form labeling
+
+
+
+// Fieldset for radio groups
+
+```
+
+## Screen Reader Compatibility
+
+### ARIA Implementation
+
+- Use semantic HTML first
+- Apply ARIA roles, states, and properties only when necessary
+- Ensure ARIA attributes are programmatically updated
+- Use `aria-live` regions for dynamic content
+
+```tsx
+// Live region for notifications
+
+ {notification &&
{notification}
}
+
+
+// Dynamic content updates
+
+ {notification.message}
+
+```
+
+### Landmark Regions
+
+- Use appropriate ARIA landmarks: banner, navigation, main, complementary, contentinfo
+- Ensure only one `role="main"` per page
+- Provide unique labels for landmark regions when multiple exist
+
+```tsx
+// HTML5 semantic elements with ARIA fallbacks
+
+
Site Name
+
+
+
+
+
+
+
+
+
+```
+
+## Color and Contrast
+
+### Contrast Requirements
+
+- Text and images of text: minimum 4.5:1 contrast ratio
+- Large text (18pt+ or 14pt bold+): minimum 3:1 contrast ratio
+- Non-text contrast (UI components, charts): minimum 3:1 contrast ratio
+- Test in grayscale to ensure information isn't conveyed by color alone
+
+### Color Usage
+
+- Never use color as the only means of conveying information
+- Provide text labels or patterns as alternatives
+- Support both light and dark color schemes
+- Respect `prefers-color-scheme` media query
+
+```css
+/* Respecting color scheme preference */
+@media (prefers-color-scheme: dark) {
+ :root {
+ --surface-bg: #1a1a1a;
+ --text-primary: #ffffff;
+ }
+}
+
+/* High contrast mode support */
+@media (prefers-contrast: high) {
+ :root {
+ --border-thick: 2px;
+ }
+}
+```
+
+## Motion and Animation
+
+### Reduced Motion
+
+- Respect `prefers-reduced-motion` media query
+- Provide alternatives for motion-based interactions
+- Eliminate non-essential animations
+- Allow users to control animation duration
+
+```tsx
+// Conditional animation based on user preference
+const useReducedMotion = () => {
+ const [prefersReducedMotion, setPrefersReducedMotion] = useState(false);
+
+ useEffect(() => {
+ const mediaQuery = window.matchMedia('(prefers-reduced-motion: reduce)');
+ setPrefersReducedMotion(mediaQuery.matches);
+
+ const handleChange = (e) => setPrefersReducedMotion(e.matches);
+ mediaQuery.addEventListener('change', handleChange);
+
+ return () => mediaQuery.removeEventListener('change', handleChange);
+ }, []);
+
+ return prefersReducedMotion;
+};
+
+// Component using reduced motion preference
+const AnimatedSection = () => {
+ const prefersReducedMotion = useReducedMotion();
+
+ return (
+
+ {/* content */}
+
+ );
+};
+```
+
+### Focus Indicators
+
+- Visible focus styles on all interactive elements
+- Do not remove `outline` property without providing equivalent visual indicator
+- Ensure focus indicators have sufficient contrast
+- Test keyboard navigation flow
+
+```css
+/* Preserve focus visibility */
+*:focus-visible {
+ outline: 2px solid var(--color-accent);
+ outline-offset: 2px;
+}
+
+/* Alternative focus style if outline is removed */
+button:focus {
+ outline: none;
+ box-shadow: 0 0 0 2px var(--color-accent);
+}
+```
+
+## Testing Checklist
+
+- [ ] All interactive elements are keyboard accessible
+- [ ] Tab order follows logical sequence
+- [ ] Skip links are present and functional
+- [ ] All form fields have associated labels
+- [ ] ARIA landmarks are properly implemented
+- [ ] Semantic HTML is used appropriately
+- [ ] Color contrast meets WCAG requirements
+- [ ] Information is not conveyed by color alone
+- [ ] `prefers-reduced-motion` is respected
+- [ ] Focus indicators are visible and clear
+- [ ] Screen reader testing performed with NVDA, VoiceOver
+- [ ] Keyboard-only testing completed
+- [ ] Automated accessibility scanning passed (axe, Lighthouse)
\ No newline at end of file
diff --git a/Claude/rules/web/test-pyramid.md b/Claude/rules/web/test-pyramid.md
new file mode 100644
index 0000000..dd1dee1
--- /dev/null
+++ b/Claude/rules/web/test-pyramid.md
@@ -0,0 +1,447 @@
+---
+name: web-test-pyramid
+description: >-
+ Web-specific test pyramid implementation strategy. Defines the optimal balance of
+ unit, integration, and end-to-end tests for web applications.
+priority: CRITICAL
+---
+
+# Test Pyramid for Web Applications
+
+> This file extends [common/test-pyramid.md](../common/test-pyramid.md) with web-specific guidelines.
+
+## Pyramid Structure
+
+```text
+ E2E Tests (10%) # Critical user flows, cross-system validation
+ │
+ │ Integration Tests (40%) # API endpoints, component interactions
+ │ │
+ │ │ Unit Tests (50%) # Components, utilities, hooks, functions
+ │ │ │
+ ▼ ▼ ▼
+ More Time ─────────── Less Time
+ Less Speed ────────── More Speed
+ Higher Cost ───────── Lower Cost
+```
+
+- **Unit Tests (50%)**: Fast, isolated, reliable tests for individual units
+- **Integration Tests (40%)**: Validate interactions between components and systems
+- **E2E Tests (10%)**: Real user scenarios, browser automation, slower execution
+
+## Test Distribution Strategy
+
+### Unit Tests (50%)
+
+Focus on:
+- React components and their behavior
+- Utility functions and data transformations
+- Custom hooks and their state management
+- Business logic and pure functions
+- Type validation and edge cases
+
+```tsx
+// Component unit test example
+import { render, screen, fireEvent } from '@testing-library/react';
+import { Button } from './Button';
+
+describe('Button', () => {
+ it('renders with correct text', () => {
+ render();
+ expect(screen.getByText('Click me')).toBeInTheDocument();
+ });
+
+ it('calls onClick handler when clicked', () => {
+ const handleClick = jest.fn();
+ render();
+
+ fireEvent.click(screen.getByRole('button'));
+
+ expect(handleClick).toHaveBeenCalledTimes(1);
+ });
+
+ it('displays loading state correctly', () => {
+ render();
+ expect(screen.getByRole('button')).toBeDisabled();
+ expect(screen.getByText('Loading...')).toBeInTheDocument();
+ });
+});
+```
+
+```tsx
+// Custom hook test example
+import { renderHook, act } from '@testing-library/react-hooks';
+import { useCounter } from './useCounter';
+
+describe('useCounter', () => {
+ it('initializes with default value', () => {
+ const { result } = renderHook(() => useCounter());
+ expect(result.current.count).toBe(0);
+ });
+
+ it('increments count when increment is called', () => {
+ const { result } = renderHook(() => useCounter());
+
+ act(() => {
+ result.current.increment();
+ });
+
+ expect(result.current.count).toBe(1);
+ });
+});
+```
+
+### Integration Tests (40%)
+
+Focus on:
+- API endpoints and route handlers
+- Database interactions and queries
+- Third-party service integrations
+- Component composition and interactions
+- State management across components
+- Authentication and authorization flows
+
+```tsx
+// API integration test example
+import { NextRequest } from 'next/server';
+import { GET } from '@/app/api/users/route';
+
+describe('GET /api/users', () => {
+ it('returns users successfully', async () => {
+ const request = new NextRequest('http://localhost/api/users');
+ const response = await GET(request);
+ const data = await response.json();
+
+ expect(response.status).toBe(200);
+ expect(data.success).toBe(true);
+ expect(Array.isArray(data.users)).toBe(true);
+ });
+
+ it('applies filters correctly', async () => {
+ const request = new NextRequest('http://localhost/api/users?role=admin');
+ const response = await GET(request);
+ const data = await response.json();
+
+ expect(response.status).toBe(200);
+ data.users.forEach(user => {
+ expect(user.role).toBe('admin');
+ });
+ });
+});
+```
+
+```tsx
+// Component integration test
+import { render, screen, waitFor } from '@testing-library/react';
+import userEvent from '@testing-library/user-event';
+import { UserForm } from '@/components/UserForm';
+import { UserList } from '@/components/UserList';
+
+describe('UserForm and UserList integration', () => {
+ it('adds new user to list when form is submitted', async () => {
+ const user = userEvent.setup();
+
+ render(\n <>\n \n \n >\n );
+
+ // Fill and submit form
+ await user.type(screen.getByLabelText(/name/i), 'John Doe');
+ await user.type(screen.getByLabelText(/email/i), 'john@example.com');
+ await user.click(screen.getByRole('button', { name: /submit/i }));
+
+ // Verify user appears in list
+ await waitFor(() => {
+ expect(screen.getByText('John Doe')).toBeInTheDocument();
+ });
+ });
+});
+```
+
+### End-to-End Tests (10%)
+
+Focus on:
+- Critical user journeys (login, signup, checkout, etc.)
+- Cross-browser compatibility
+- Realistic user interactions
+- Performance and loading behavior
+- Accessibility requirements
+
+```tsx
+// E2E test with Playwright
+import { test, expect } from '@playwright/test';
+
+test('user can complete purchase flow', async ({ page }) => {
+ // Navigate to home
+ await page.goto('/');
+
+ // Add item to cart
+ await page.click('text=Add to Cart');
+ await page.click('text=View Cart');
+
+ // Verify cart contents
+ await expect(page.locator('.cart-item')).toHaveCount(1);
+
+ // Proceed to checkout
+ await page.click('text=Checkout');
+
+ // Fill shipping information
+ await page.fill('input[name="email"]', 'test@example.com');
+ await page.fill('input[name="name"]', 'Test User');
+ await page.fill('input[name="address"]', '123 Main St');
+ await page.fill('input[name="city"]', 'Anytown');
+
+ // Submit order
+ await page.click('text=Place Order');
+
+ // Verify success
+ await expect(page.locator('text=Order confirmed')).toBeVisible();
+ await expect(page).toHaveURL(/confirmation/);
+});
+```
+
+```tsx
+// Login flow E2E test
+test('user can login and access dashboard', async ({ page }) => {
+ // Navigate to login
+ await page.goto('/login');
+
+ // Fill login form
+ await page.fill('input[name="email"]', 'test@example.com');
+ await page.fill('input[name="password"]', 'password123');
+ await page.click('text=Log In');
+
+ // Verify redirect to dashboard
+ await expect(page).toHaveURL('/dashboard');
+
+ // Verify dashboard elements
+ await expect(page.locator('h1')).toContainText('Dashboard');
+ await expect(page.getByRole('navigation')).toBeVisible();
+});
+```
+
+## Testing Guidelines
+
+### Test Selection Patterns
+
+| Scenario | Test Type |
+|---------|-----------|
+| Component UI and interactions | Unit test |
+| Data fetching and caching | Integration test |
+| Business logic and validation | Unit test |
+| API endpoint behavior | Integration test |
+| Third-party service integration | Integration test |
+| End-to-end user scenarios | E2E test |
+| Cross-browser compatibility | E2E test |
+| Performance benchmarks | E2E test |
+| Accessibility compliance | E2E test |
+| Complex state management | Integration test |
+
+### Mocking Strategy
+
+#### When to Mock
+
+- External APIs and third-party services
+- Authentication providers
+- Payment processors
+- Analytics and tracking scripts
+- Email and notification services
+- Background jobs and queues
+
+#### When NOT to Mock
+
+- Application state management
+- Core business logic
+- Component interactions within the same feature
+- Local storage and cookies (test actual behavior)
+- Routing within the application
+
+#### Mocking Implementation
+
+```tsx
+// Mock external service
+type MockServiceResponse = {
+ data: any;
+ error: string | null;
+};
+
+const mockService = {
+ fetchData: jest.fn<() => Promise>(()
+ Promise.resolve({ data: [{ id: 1, name: 'Test' }], error: null })
+ ),
+ updateData: jest.fn(),
+ deleteData: jest.fn()
+};
+
+// In test setup
+beforeEach(() => {
+ jest.resetAllMocks();
+ // Set default mock behavior
+ mockService.fetchData.mockResolvedValue({
+ data: [{ id: 1, name: 'Test Item' }],
+ error: null
+ });
+});
+```
+
+```tsx
+// Mock API route handler
+jest.mock('@/lib/api-client', () => ({
+ fetchUsers: jest.fn(() => Promise.resolve([
+ { id: 1, name: 'John Doe', email: 'john@example.com' }
+ ])),
+ createUser: jest.fn()
+}));
+
+// Mock database
+jest.mock('@/lib/db', () => ({
+ query: jest.fn(),
+ connect: jest.fn()
+}));
+```
+
+## Testing Tools and Frameworks
+
+| Test Type | Recommended Tools |
+|-----------|-------------------|
+| Unit Tests | Jest, Vitest, React Testing Library |
+| Integration Tests | Jest, Vitest, Supertest, React Testing Library |
+| E2E Tests | Playwright, Cypress |
+| Visual Regression | Playwright, Percy, Chromatic |
+| Performance Tests | Lighthouse, WebPageTest |
+| Accessibility Tests | axe, Pa11y, Lighthouse |
+
+## Test Organization
+
+```
+src/
+├── components/
+│ ├── Button/
+│ │ ├── Button.tsx
+│ │ ├── Button.test.tsx # Unit test
+│ │ └── Button.stories.tsx
+│ └── Form/
+│ ├── Form.tsx
+│ ├── Form.test.tsx
+│ └── __tests__/
+│ ├── Form.integration.test.tsx # Integration test
+│ └── Form.validation.test.tsx # Unit test
+├── pages/
+│ ├── api/
+│ │ ├── users/
+│ │ │ ├── route.ts
+│ │ │ └── route.test.ts # Integration test
+│ │ └── products/
+│ │ ├── route.ts
+│ │ └── route.test.ts
+│ └── __e2e__/
+│ ├── login.test.ts # E2E test
+│ └── checkout.test.ts # E2E test
+└── lib/
+ ├── api/
+ │ ├── client.ts
+ │ └── client.test.ts # Unit test
+ └── auth/
+ ├── index.ts
+ └── auth.test.ts # Unit test
+```
+
+## Performance Considerations
+
+- Unit tests should run in < 50ms each
+- Integration tests should run in < 500ms each
+- E2E tests should run in < 10s each (aim for < 5s)
+- Full test suite should complete in < 2 minutes
+- Use test parallelization when possible
+- Mock slow operations in unit and integration tests
+
+## Continuous Integration
+
+```yaml
+# GitHub Actions workflow
+name: Test Suite
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: 18
+
+ - name: Install dependencies
+ run: npm ci
+
+ - name: Run unit tests
+ run: npm run test:unit
+
+ - name: Run integration tests
+ run: npm run test:integration
+
+ - name: Run E2E tests
+ run: npm run test:e2e
+
+ - name: Generate coverage report
+ run: npm run test:coverage
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v3
+```
+
+## Testing Checklist
+
+### Unit Tests
+
+- [ ] Test individual components in isolation
+- [ ] Verify component props and state
+- [ ] Test user interactions (clicks, input, etc.)
+- [ ] Verify appropriate ARIA attributes
+- [ ] Test loading and error states
+- [ ] Use appropriate testing library utilities
+- [ ] Mock external dependencies
+- [ ] Keep tests focused and fast
+- [ ] Test edge cases and error handling
+
+### Integration Tests
+
+- [ ] Test API endpoints and route handlers
+- [ ] Verify database interactions
+- [ ] Test component composition
+- [ ] Validate state management
+- [ ] Test authentication flows
+- [ ] Mock external services
+- [ ] Test error handling and edge cases
+- [ ] Verify data transformations
+- [ ] Test caching behavior
+- [ ] Validate security measures
+
+### E2E Tests
+
+- [ ] Test critical user journeys
+- [ ] Verify navigation and routing
+- [ ] Test form submissions
+- [ ] Validate data persistence
+- [ ] Test error scenarios
+- [ ] Verify loading states
+- [ ] Test accessibility requirements
+- [ ] Validate performance metrics
+- [ ] Test cross-browser compatibility
+- [ ] Include visual regression checks
+
+## Success Metrics
+
+- Test suite runs in under 2 minutes
+- 80%+ code coverage achieved
+- No flaky tests (100% pass rate)
+- Critical user flows covered by E2E tests
+- Each bug fix includes a regression test
+- New features include appropriate test coverage
+- Test pyramid ratio maintained (50/40/10)
\ No newline at end of file
diff --git a/Claude/skills/exploratory-testing.md b/Claude/skills/exploratory-testing.md
new file mode 100644
index 0000000..1c677bc
--- /dev/null
+++ b/Claude/skills/exploratory-testing.md
@@ -0,0 +1,265 @@
+---
+name: exploratory-testing
+trigger: "use PROACTIVELY when conducting QA, debugging, or user testing"
+description: Comprehensive guide to exploratory testing methodologies, charter development, session reporting, and best practices.
+---
+
+# Exploratory Testing Mastery
+
+Practical framework for structured exploratory testing that balances freedom with accountability. Based on ISTQB best practices and real-world application.
+
+## 1. Exploratory Testing Charters
+
+A charter defines the mission and scope for a testing session. It should be specific enough to provide direction, but flexible enough to allow discovery.
+
+### Charter Template
+
+```
+**Mission**: [What are you trying to achieve?]
+**Scope**: [Specific features/components to explore]
+**Risks**: [Known risk areas to investigate]
+**Timebox**: [Duration of session, typically 60-90 minutes]
+**Reference Materials**: [PRs, requirements, user stories to review]
+**Success Metrics**: [What does a successful session look like?]
+```
+
+### Example Charters
+
+**Authentication Flow Exploration**
+```
+Mission: Uncover edge cases in the multi-factor authentication flow
+Scope: Login page, 2FA entry, recovery codes, biometric authentication
+Risks: Session hijacking, brute force attacks, accessibility issues
+Timebox: 75 minutes
+Reference Materials: PR #1423, Security RFC-2024-001, User Story AUTH-45
+Success Metrics: Discover at least 3 edge cases, document 2 potential security improvements
+```
+
+**Checkout Process Resilience**
+```
+Mission: Test system behavior under failure conditions during checkout
+Scope: Payment processing, order confirmation, email notifications
+Risks: Payment gateway timeouts, inventory race conditions, network interruptions
+Timebox: 90 minutes
+Reference Materials: Payment Integration Doc v2.3, Order Service Architecture
+Success Metrics: Identify failure recovery gaps, suggest 3 resilience improvements
+```
+
+## 2. Test Session Structure
+
+Follow a disciplined approach to ensure productive sessions:
+
+### Pre-Session Preparation
+- Review charter and reference materials
+- Set up test environment and data
+- Configure necessary tools (browser dev tools, network throttling, etc.)
+- Document starting state
+
+### Session Execution (90-Minute Format)
+```
+0-15 min: Familiarization & hypothesis generation
+15-60 min: Deep exploration with varied inputs and sequences
+60-75 min: Focus on highest-risk areas identified
+75-90 min: Wrap-up and initial findings documentation
+```
+
+### Heuristic Test Strategy Model (HTSM)
+
+Use these dimensions to guide exploration:
+
+**FUNCTIONS**: What does it do? What doesn't it do?
+**DATA**: Valid/invalid inputs, boundary conditions, data dependencies
+**PLATFORMS**: Different browsers, devices, operating systems
+**OPERATIONS**: Typical user workflows, edge case scenarios
+**TIME**: Performance under load, timing dependencies, timeouts
+
+## 3. Session Reporting
+
+Document findings in a structured format that enables action:
+
+### Minimal Session Report
+```
+# Exploratory Testing Session Report
+
+**Date**: YYYY-MM-DD
+**Tester**: [Name]
+**Charter**: [Mission]
+**Duration**: [Start Time] - [End Time] ([Total Minutes] min)
+
+## Summary
+[2-3 sentence overview of key findings]
+
+## Areas Explored
+- [Feature/Component 1]
+- [Feature/Component 2]
+- [Feature/Component 3]
+
+## Findings
+
+### Critical Issues
+- [Issue 1: Brief description with reproduction steps]
+- [Issue 2: Brief description with reproduction steps]
+
+### Improvement Opportunities
+- [Suggestion 1: Clear, actionable recommendation]
+- [Suggestion 2: Clear, actionable recommendation]
+
+### Questions for Team
+- [Question 1: Specific question requiring clarification]
+- [Question 2: Specific question about expected behavior]
+
+## Time Allocation
+- Familiarization: X minutes
+- Deep exploration: X minutes
+- Focus on risks: X minutes
+- Documentation: X minutes
+
+## Confidence Rating
+[High/Medium/Low] - [Brief justification]
+```
+
+### Example Session Report
+```
+# Exploratory Testing Session Report
+
+**Date**: 2025-03-15
+**Tester**: Jane Doe
+**Charter**: Authentication Flow Exploration
+**Duration**: 10:00 - 11:15 (75 min)
+
+## Summary
+Uncovered three edge cases in 2FA recovery flow and identified accessibility gaps in biometric authentication. Discovered potential session fixation vulnerability when switching authentication methods.
+
+## Areas Explored
+- Login with email/password
+- SMS-based 2FA
+- Authenticator app 2FA
+- Biometric authentication (Touch ID)
+- Recovery code generation and use
+- Authentication method switching
+
+## Findings
+
+### Critical Issues
+- Session not invalidated when switching from password to biometric authentication, creating session fixation risk
+- Recovery codes not properly rate-limited, enabling brute force attacks
+- Biometric fallback not available when Face ID fails multiple times
+
+### Improvement Opportunities
+- Implement step-up authentication when switching authentication methods
+- Add progressive rate limiting for recovery code attempts
+- Provide alternative 2FA method when biometrics fail
+- Enhance error messages for failed biometric attempts
+
+### Questions for Team
+- What is the expected behavior when a user's biometric enrollment changes (e.g., new phone)?
+- Are there plans to support security keys as a 2FA option?
+
+## Time Allocation
+- Familiarization: 12 minutes
+- Deep exploration: 48 minutes
+- Focus on risks: 10 minutes
+- Documentation: 5 minutes
+
+## Confidence Rating
+Medium - Covered primary attack vectors but limited time to test edge network conditions
+```
+
+## 4. Exploratory Testing Techniques
+
+### Touring Heuristics
+Apply these mental models during exploration:
+
+**Bug Taxonomy Tour**: Systematically test for common bug patterns:
+- Input validation failures
+- Error handling gaps
+- State management issues
+- Race conditions
+- Security vulnerabilities
+- Accessibility barriers
+
+**Quality Attributes Tour**: Focus on non-functional aspects:
+- Performance under various loads
+- Usability across different user personas
+- Reliability during network interruptions
+- Security against common attack vectors
+- Compatibility across platforms
+
+**Data Life Cycle Tour**: Follow data from creation to deletion:
+- Data entry and validation
+- Data storage and encryption
+- Data processing and transformation
+- Data display and formatting
+- Data sharing and export
+- Data deletion and retention
+
+### Session-Based Test Management (SBTM)
+
+Track exploratory testing as a structured practice:
+
+```
+| Session | Charter | Tester | Date | Duration | Bugs Found | Status |
+|---------|---------|--------|------|----------|------------|--------|
+| 001 | Auth Flow Exploration | Jane Doe | 2025-03-15 | 75 min | 3 | Complete |
+| 002 | Checkout Resilience | John Smith | 2025-03-16 | 90 min | 5 | Complete |
+| 003 | Search Relevance | Jane Doe | 2025-03-17 | 60 min | 2 | In Progress |
+```
+
+## 5. Integrating with Agile Workflow
+
+### When to Use Exploratory Testing
+- After major feature implementation
+- Before production releases
+- In response to user-reported issues
+- Following security incidents
+- When automated tests fail to catch critical bugs
+
+### Complementary Practices
+- Pair testing with developers or product owners
+- Bug bash sessions before releases
+- Context-driven test tours for new team members
+- Exploratory testing workshops to share findings
+
+## 6. Advanced Techniques
+
+### Exploratory Automation
+Combine exploratory principles with automation:
+
+```javascript
+// Randomized input generator for form testing
+function generateRandomUser() {
+ return {
+ email: `${randomString(8)}@${randomDomain()}`,
+ password: generateStrongPassword(),
+ age: randomInt(13, 100),
+ phone: generatePhoneNumber()
+ };
+}
+
+// Property-based testing for API endpoints
+function testAPIEndpointProperty(endpoint, property, generator) {
+ const testCases = Array.from({length: 100}, () => generator());
+
+ testCases.forEach(testCase => {
+ const response = callAPI(endpoint, testCase);
+ assert(property(response, testCase),
+ `Property violation for input: ${JSON.stringify(testCase)}`);
+ });
+}
+```
+
+### Cognitive Bias Mitigation
+
+Common biases and countermeasures:
+
+**Confirmation Bias**: Seek to disprove hypotheses, not confirm them
+**Automation Bias**: Don't trust automated results blindly; verify manually
+**Anchoring Bias**: Don't fixate on first observed behavior; explore alternatives
+**Availability Bias**: Don't focus only on obvious test areas; use checklists
+
+## 7. References
+
+- ISTQB Advanced Level Test Analyst Syllabus
+- Cem Kaner's "Exploratory Testing" concepts
+- James Bach's "Session-Based Test Management"
+- "Lessons Learned in Software Testing" by Cem Kaner, James Bach, and Bret Pettichord
\ No newline at end of file
diff --git a/Claude/skills/manual-testing.md b/Claude/skills/manual-testing.md
new file mode 100644
index 0000000..162e61e
--- /dev/null
+++ b/Claude/skills/manual-testing.md
@@ -0,0 +1,64 @@
+---
+name: manual-testing
+description: Manual testing strategies including test planning, case design, exploratory testing, bug reporting, and quality assurance processes.
+origin: ECC
+---
+
+# Manual Testing Guide
+
+Comprehensive manual testing strategies for ensuring software quality through systematic test planning, execution, and reporting.
+
+## Test Planning
+
+### Test Strategy Development
+
+1. **Define Objectives**:
+ - Identify key features to test
+ - Determine testing scope and boundaries
+ - Establish success criteria
+
+2. **Risk Assessment**:
+ - Identify high-risk areas (financial, security, core functionality)
+ - Prioritize testing based on risk level
+ - Allocate resources accordingly
+
+3. **Test Environment Setup**:
+ - Replicate production-like conditions
+ - Document environment configuration
+ - Ensure test data availability
+
+4. **Schedule and Resources**:
+ - Create realistic timelines
+ - Assign team members to specific areas
+ - Plan for contingencies
+
+### Test Charter (Session-Based Testing)
+
+```markdown
+# Test Charter: User Authentication
+
+**Objective**: Verify login functionality across all supported scenarios
+
+**Scope**:
+- Email/password login
+- Social login (Google, GitHub)
+- Password reset flows
+- Account lockout policies
+
+**Risks**:
+- Security vulnerabilities
+- Third-party service failures
+- Edge cases in validation
+
+**Data Requirements**:
+- Valid test accounts
+- Invalid credentials
+- Locked/blocked accounts
+
+**Tools**:
+- Browser developer tools
+- Network monitoring
+- Screenshot tool
+
+**Duration**: 90 minutes
+```
\ No newline at end of file
diff --git a/Claude/skills/quality-metrics.md b/Claude/skills/quality-metrics.md
new file mode 100644
index 0000000..6a247ee
--- /dev/null
+++ b/Claude/skills/quality-metrics.md
@@ -0,0 +1,1573 @@
+---
+type: skill
+name: quality-metrics
+description: Comprehensive framework for measuring, tracking, and improving software quality through actionable metrics, KPIs, and continuous improvement practices.
+origin: ECC
+---
+
+# Quality Metrics Framework
+
+Strategic approach to defining, collecting, and acting on software quality metrics that drive continuous improvement and business value.
+
+## Quality KPIs and Metrics
+
+### Defect Management Metrics
+
+```markdown
+# Defect Management Dashboard
+
+## Key Metrics
+
+| Metric | Formula | Target | Current | Status |
+|--------|---------|--------|---------|--------|
+| Defect Density | Total Defects / KLOC | ≤ 1.0 | 0.8 | ✅ | |
+| Critical Defect Resolution Time | Avg. time to resolve P0/P1 defects | ≤ 4h | 3.2h | ✅ |
+| Defect Reopen Rate | Reopened Defects / Total Resolved | ≤ 5% | 3.8% | ✅ |
+| Test Escape Rate | Production Defects / Total Defects | ≤ 10% | 8.5% | ✅ |
+| Defect Turnaround Time | Avg. time from report to resolution | ≤ 24h | 18.5h | ✅ |
+
+## Defect Trends (Last 8 Weeks)
+
+```mermaid
+lineChart
+ title Defect Trends
+ x-axis Week
+ y-axis Count
+ series Reported, Resolved
+ Reported : 45, 38, 42, 36, 39, 33, 30, 28
+ Resolved : 32, 41, 39, 40, 37, 35, 34, 31
+```
+
+## Defect Distribution
+
+```mermaid
+pie
+ title Defect Priority Distribution
+ "P0: Critical" : 5
+ "P1: High" : 12
+ "P2: Medium" : 25
+ "P3: Low" : 18
+```
+
+```mermaid
+pie
+ title Defect Origin
+ "Frontend" : 24
+ "Backend" : 28
+ "Integration" : 12
+ "Infrastructure" : 6
+ "Documentation" : 10
+```
+
+## Action Items
+
+- Investigate increase in P1 defects from integration points
+- Review test coverage for backend components with highest defect density
+- Conduct root cause analysis for reopened defects
+```
+
+### Test Coverage Metrics
+
+```markdown
+# Test Coverage Dashboard
+
+## Coverage Summary
+
+| Component | Unit Coverage | Integration Coverage | E2E Coverage | Total Coverage |
+|-----------|---------------|----------------------|--------------|----------------|
+| Authentication | 98% | 95% | 100% | 97.7% |
+| Payment Processing | 95% | 92% | 100% | 95.7% |
+| User Management | 92% | 88% | 95% | 91.7% |
+| Reporting | 85% | 80% | 90% | 85.0% |
+| API Gateway | 90% | 85% | 95% | 90.0% |
+| Overall | 92% | 88% | 96% | 92.0% |
+
+## Coverage Trends (Last 6 Months)
+
+```mermaid
+lineChart
+ title Test Coverage Trends
+ x-axis Month
+ y-axis Percentage
+ series Unit, Integration, E2E, Overall
+ Unit : 88, 89, 90, 91, 91.5, 92
+ Integration : 82, 83, 84, 86, 87, 88
+ E2E : 90, 91, 92, 93, 94, 96
+ Overall : 86.7, 87.7, 88.7, 89.7, 90.2, 92.0
+```
+
+## Gaps Analysis
+
+### Low Coverage Areas
+
+1. **Error Handling Paths** (Current: 65%)
+ - Missing negative test cases for edge conditions
+ - Insufficient validation of malformed inputs
+ - Limited testing of third-party service failures
+
+2. **Concurrent Operations** (Current: 70%)
+ - Race condition scenarios not adequately tested
+ - Limited stress testing for high-concurrency scenarios
+ - Incomplete testing of distributed locking mechanisms
+
+3. **Security Controls** (Current: 75%)
+ - Authentication bypass scenarios missing
+ - Insufficient authorization testing for role transitions
+ - Limited testing of CSRF and XSRF protections
+
+## Action Plan
+
+| Initiative | Owner | Target Date | Status |
+|-----------|-------|-------------|--------|
+| Implement mutation testing | QA Lead | 2025-06-30 | In Progress |
+| Expand negative test suite | Test Engineer | 2025-05-15 | Planned |
+| Add concurrency stress tests | SDET | 2025-07-31 | Research |
+| Enhance security test coverage | Security Engineer | 2025-06-15 | Planned |
+```
+
+### Release Quality Metrics
+
+```markdown
+# Release Quality Dashboard
+
+## Release Metrics
+
+| Release | Version | UAT Defects | P0/P1 in Prod | Rollback | Business Impact | Quality Score |
+|---------|---------|-------------|---------------|----------|----------------|---------------|
+| 2025-03-15 | 2.3.1 | 5 | 0 | No | Minimal | 9.2/10 |
+| 2025-02-28 | 2.3.0 | 12 | 1 | No | Low | 8.1/10 |
+| 2025-02-10 | 2.2.5 | 8 | 2 | Yes | Medium | 6.5/10 |
+| 2025-01-25 | 2.2.4 | 6 | 0 | No | Minimal | 9.0/10 |
+| 2025-01-05 | 2.2.3 | 15 | 3 | Yes | High | 5.8/10 |
+
+## Deployment Success Rate
+
+```mermaid
+lineChart
+ title Deployment Success Rate
+ x-axis Week
+ y-axis Percentage
+ series Success Rate
+ Success Rate : 100, 100, 85, 100, 70, 100, 100, 100
+```
+
+## Post-Deployment Monitoring
+
+### Performance Impact
+
+| Metric | Pre-Release | Post-Release | Delta | Status |
+|--------|-------------|--------------|-------|--------|
+| API Response Time | 215ms | 228ms | +13ms | ⚠️ |
+| Error Rate | 0.45% | 0.52% | +0.07% | ⚠️ |
+| Throughput | 1,240 req/s | 1,180 req/s | -60 req/s | ⚠️ |
+| Cache Hit Rate | 92.3% | 91.8% | -0.5% | ✅ |
+
+### Business Impact
+
+```mermaid
+pie
+ title User Feedback
+ "Positive" : 45
+ "Neutral" : 30
+ "Negative" : 25
+```
+
+```mermaid
+pie
+ title Issue Categories
+ "UI/UX" : 40
+ "Performance" : 25
+ "Missing Features" : 20
+ "Bugs" : 15
+```
+
+## Release Process Improvements
+
+1. **Enhanced Pre-Release Checklist**
+ - Add performance regression gates
+ - Implement security scan requirements
+ - Require test coverage minimums
+
+2. **Improved Rollback Procedures**
+ - Automate rollback triggers based on health checks
+ - Implement blue-green deployment strategy
+ - Add database migration rollback plans
+
+3. **Post-Mortem Process**
+ - Conduct blameless post-mortems for failed releases
+ - Track action items to resolution
+ - Share learnings across teams
+```
+
+## Metrics Collection Framework
+
+### Automated Metrics Pipeline
+
+```yaml
+# .github/workflows/quality-metrics.yml
+name: Quality Metrics Collection
+
+on:
+ schedule:
+ - cron: '0 2 * * 1' # Run every Monday at 2 AM
+ workflow_dispatch:
+
+jobs:
+ collect-metrics:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '18'
+ cache: 'npm'
+
+ - name: Install dependencies
+ run: npm ci
+
+ - name: Extract test results
+ run: |
+ # Collect JUnit test results
+ mkdir -p metrics/test-results
+ find . -name "TEST-*.xml" -exec cp {} metrics/test-results/ \; 2>/dev/null || true
+
+ # Generate coverage report
+ npm run test:coverage -- --output-file=metrics/coverage.json
+
+ - name: Run static analysis
+ run: |
+ # Run ESLint and generate report
+ npx eslint . --format=json --output-file=metrics/eslint.json
+
+ # Run SonarQube scanner
+ npx sonarqube-scanner
+
+ - name: Collect CI/CD metrics
+ run: |
+ # Get recent pipeline data
+ gh api
+ -X GET
+ -H "Accept: application/vnd.github.v3+json"
+ "/repos/${{ github.repository }}/actions/runs?per_page=100"
+ > metrics/pipeline_runs.json
+
+ # Get deployment data
+ gh api
+ -X GET
+ -H "Accept: application/vnd.github.v3+json"
+ "/repos/${{ github.repository }}/deployments?per_page=100"
+ > metrics/deployments.json
+
+ - name: Generate quality dashboard
+ run: |
+ # Run metrics processing script
+ node scripts/generate-quality-dashboard.js
+
+ # Update README with latest metrics
+ node scripts/update-readme-metrics.js
+
+ - name: Commit and push updates
+ run: |
+ git config user.name 'github-actions'
+ git config user.email 'github-actions@github.com'
+
+ # Only commit if there are changes
+ if [ -n "$(git status --porcelain)" ]; then
+ git add .
+ git commit -m "[automated] Update quality metrics $(date +%Y-%m-%d)"
+ git push
+ fi
+```
+
+### Metrics Processing Script
+
+```javascript
+// scripts/generate-quality-dashboard.js
+const fs = require('fs');
+const path = require('path');
+const axios = require('axios');
+const jsYaml = require('js-yaml');
+
+// Configuration
+const CONFIG = {
+ metricsPath: './metrics',
+ outputPath: './docs/dashboards',
+ services: {
+ sonarqube: 'https://sonarqube.example.com',
+ jenkins: 'https://jenkins.example.com',
+ datadog: 'https://api.datadoghq.com'
+ }
+};
+
+class MetricsProcessor {
+ constructor() {
+ this.metrics = {
+ test: {},
+ coverage: {},
+ defects: {},
+ performance: {},
+ ci_cd: {}
+ };
+ }
+
+ async processAllMetrics() {
+ console.log('Starting metrics processing...');
+
+ try {
+ // Load raw data
+ await this.loadTestData();
+ await this.loadCoverageData();
+ await this.loadDefectData();
+ await this.loadPerformanceData();
+ await this.loadCiCdData();
+
+ // Calculate KPIs
+ this.calculateTestKpis();
+ this.calculateCoverageKpis();
+ this.calculateDefectKpis();
+ this.calculatePerformanceKpis();
+ this.calculateCiCdKpis();
+
+ // Generate dashboards
+ await this.generateDashboards();
+
+ // Save processed metrics
+ this.saveProcessedMetrics();
+
+ console.log('Metrics processing completed successfully');
+
+ } catch (error) {
+ console.error('Error processing metrics:', error);
+ throw error;
+ }
+ }
+
+ async loadTestData() {
+ console.log('Loading test data...');
+
+ const testResultsPath = path.join(CONFIG.metricsPath, 'test-results');
+ const testFiles = fs.readdirSync(testResultsPath);
+
+ const testResults = [];
+ for (const file of testFiles) {
+ if (file.endsWith('.xml')) {
+ const content = fs.readFileSync(path.join(testResultsPath, file), 'utf8');
+ const parsed = this.parseJUnitXml(content);
+ testResults.push(...parsed.testsuites[0].testcase);
+ }
+ }
+
+ this.metrics.test.raw = testResults;
+ }
+
+ async loadCoverageData() {
+ console.log('Loading coverage data...');
+
+ const coveragePath = path.join(CONFIG.metricsPath, 'coverage.json');
+ if (fs.existsSync(coveragePath)) {
+ const coverageData = JSON.parse(fs.readFileSync(coveragePath, 'utf8'));
+ this.metrics.coverage.raw = coverageData;
+ }
+ }
+
+ async loadDefectData() {
+ console.log('Loading defect data...');
+
+ // Fetch from Jira or other issue tracking system
+ try {
+ const response = await axios.get(
+ `${CONFIG.services.jira}/rest/api/3/search`,
+ {
+ params: {
+ jql: 'project=QUALITY AND created >= -8w ORDER BY created DESC',
+ fields: 'summary,status,priority,created,updated'
+ },
+ headers: {
+ 'Authorization': `Basic ${Buffer.from(process.env.JIRA_USER + ':' + process.env.JIRA_TOKEN).toString('base64')}`
+ }
+ }
+ );
+
+ this.metrics.defects.raw = response.data.issues;
+
+ } catch (error) {
+ console.warn('Could not fetch defect data:', error.message);
+ // Use fallback data
+ this.metrics.defects.raw = this.loadLocalDefectData();
+ }
+ }
+
+ loadLocalDefectData() {
+ // Load from local file if Jira is unavailable
+ const localPath = path.join(CONFIG.metricsPath, 'local-defects.json');
+ if (fs.existsSync(localPath)) {
+ return JSON.parse(fs.readFileSync(localPath, 'utf8'));
+ }
+ return [];
+ }
+
+ async loadPerformanceData() {
+ console.log('Loading performance data...');
+
+ // Fetch from monitoring system
+ try {
+ const [responseTime, errorRate, throughput] = await Promise.all([
+ axios.get(`${CONFIG.services.datadog}/api/v1/metrics/query`, {
+ params: {
+ query: 'avg:api.response.time{env:production}.as_rate()'
+ },
+ headers: {
+ 'DD-API-KEY': process.env.DATADOG_API_KEY
+ }
+ }),
+ axios.get(`${CONFIG.services.datadog}/api/v1/metrics/query`, {
+ params: {
+ query: 'avg:api.error.rate{env:production}.as_rate()'
+ },
+ headers: {
+ 'DD-API-KEY': process.env.DATADOG_API_KEY
+ }
+ }),
+ axios.get(`${CONFIG.services.datadog}/api/v1/metrics/query`, {
+ params: {
+ query: 'avg:api.throughput{env:production}.as_rate()'
+ },
+ headers: {
+ 'DD-API-KEY': process.env.DATADOG_API_KEY
+ }
+ })
+ ]);
+
+ this.metrics.performance.response_time = responseTime.data;
+ this.metrics.performance.error_rate = errorRate.data;
+ this.metrics.performance.throughput = throughput.data;
+
+ } catch (error) {
+ console.warn('Could not fetch performance data:', error.message);
+ }
+ }
+
+ async loadCiCdData() {
+ console.log('Loading CI/CD data...');
+
+ const pipelineRunsPath = path.join(CONFIG.metricsPath, 'pipeline_runs.json');
+ const deploymentsPath = path.join(CONFIG.metricsPath, 'deployments.json');
+
+ if (fs.existsSync(pipelineRunsPath)) {
+ this.metrics.ci_cd.pipeline_runs = JSON.parse(
+ fs.readFileSync(pipelineRunsPath, 'utf8')
+ );
+ }
+
+ if (fs.existsSync(deploymentsPath)) {
+ this.metrics.ci_cd.deployments = JSON.parse(
+ fs.readFileSync(deploymentsPath, 'utf8')
+ );
+ }
+ }
+
+ calculateTestKpis() {
+ console.log('Calculating test KPIs...');
+
+ const tests = this.metrics.test.raw;
+
+ this.metrics.test.kpis = {
+ total_tests: tests.length,
+ passed: tests.filter(t => !t.failure && !t.error).length,
+ failed: tests.filter(t => t.failure || t.error).length,
+ skipped: tests.filter(t => t.$.status === 'skipped').length,
+ pass_rate: 0,
+ average_duration: 0
+ };
+
+ // Calculate pass rate
+ this.metrics.test.kpis.pass_rate =
+ parseFloat(((this.metrics.test.kpis.passed / tests.length) * 100).toFixed(2));
+
+ // Calculate average duration
+ const durations = tests.map(t => parseFloat(t.$.time) || 0);
+ this.metrics.test.kpis.average_duration =
+ parseFloat((durations.reduce((a, b) => a + b, 0) / durations.length).toFixed(3));
+
+ // Failure trends
+ this.metrics.test.kpis.failure_trends = this.calculateTrendData(
+ tests, 'day', (t) => new Date(t.$.timestamp || t.$.time).toISOString().split('T')[0]
+ );
+ }
+
+ calculateCoverageKpis() {
+ console.log('Calculating coverage KPIs...');
+
+ const raw = this.metrics.coverage.raw;
+
+ if (raw && raw.total) {
+ const total = raw.total;
+
+ this.metrics.coverage.kpis = {
+ lines: {
+ covered: total.lines.covered,
+ total: total.lines.total,
+ percentage: parseFloat(((total.lines.covered / total.lines.total) * 100).toFixed(2))
+ },
+ statements: {
+ covered: total.statements.covered,
+ total: total.statements.total,
+ percentage: parseFloat(((total.statements.covered / total.statements.total) * 100).toFixed(2))
+ },
+ functions: {
+ covered: total.functions.covered,
+ total: total.functions.total,
+ percentage: parseFloat(((total.functions.covered / total.functions.total) * 100).toFixed(2))
+ },
+ branches: {
+ covered: total.branches.covered,
+ total: total.branches.total,
+ percentage: parseFloat(((total.branches.covered / total.branches.total) * 100).toFixed(2))
+ }
+ };
+ }
+ }
+
+ calculateDefectKpis() {
+ console.log('Calculating defect KPIs...');
+
+ const defects = this.metrics.defects.raw;
+ const now = new Date();
+
+ // Filter recent defects
+ const recentDefects = defects.filter(d =>
+ new Date(d.fields.created) > new Date(now.getTime() - 8 * 7 * 24 * 60 * 60 * 1000)
+ );
+
+ // Count by priority
+ const priorityCounts = this.countBy(recentDefects, d => d.fields.priority.name);
+
+ this.metrics.defects.kpis = {
+ total: recentDefects.length,
+ by_priority: priorityCounts,
+ p0_p1_count: (priorityCounts['Critical'] || 0) + (priorityCounts['High'] || 0),
+ reopen_rate: 0, // Would need resolution history
+ average_resolution_time: 0, // Would need resolution dates
+ trends: this.calculateTrendData(
+ recentDefects, 'week', (d) => this.getWeekString(new Date(d.fields.created))
+ )
+ };
+ }
+
+ calculatePerformanceKpis() {
+ console.log('Calculating performance KPIs...');
+
+ const responseTime = this.metrics.performance.response_time;
+ const errorRate = this.metrics.performance.error_rate;
+ const throughput = this.metrics.performance.throughput;
+
+ if (responseTime && responseTime.series && responseTime.series[0]) {
+ const points = responseTime.series[0].pointlist;
+ const values = points.map(p => p[1]).filter(v => v !== null);
+
+ this.metrics.performance.kpis = {
+ response_time: {
+ average: parseFloat((values.reduce((a, b) => a + b, 0) / values.length).toFixed(2)),
+ p95: parseFloat(this.getPercentile(values, 95).toFixed(2)),
+ p99: parseFloat(this.getPercentile(values, 99).toFixed(2))
+ }
+ };
+ }
+ }
+
+ calculateCiCdKpis() {
+ console.log('Calculating CI/CD KPIs...');
+
+ const pipelineRuns = this.metrics.ci_cd.pipeline_runs?.workflow_runs || [];
+ const deployments = this.metrics.ci_cd.deployments || [];
+
+ // Filter recent pipeline runs
+ const recentRuns = pipelineRuns.filter(run =>
+ new Date(run.created_at) > new Date(Date.now() - 30 * 24 * 60 * 60 * 1000)
+ );
+
+ // Calculate success rate
+ const successfulRuns = recentRuns.filter(run => run.conclusion === 'success').length;
+ const totalRuns = recentRuns.length;
+
+ this.metrics.ci_cd.kpis = {
+ pipeline_success_rate: totalRuns > 0 ?
+ parseFloat(((successfulRuns / totalRuns) * 100).toFixed(2)) : 100,
+ average_duration: 0,
+ deployment_frequency: deployments.length / 4, // Per week
+ lead_time: this.calculateLeadTime(deployments),
+ rollback_rate: this.calculateRollbackRate(deployments)
+ };
+
+ // Average duration
+ if (recentRuns.length > 0) {
+ const durations = recentRuns.map(run => {
+ const createdAt = new Date(run.created_at);
+ const updatedAt = new Date(run.updated_at);
+ return (updatedAt - createdAt) / 1000; // seconds
+ });
+ this.metrics.ci_cd.kpis.average_duration =
+ parseFloat((durations.reduce((a, b) => a + b, 0) / durations.length / 60).toFixed(2)); // minutes
+ }
+ }
+
+ async generateDashboards() {
+ console.log('Generating dashboards...');
+
+ // Ensure output directory exists
+ if (!fs.existsSync(CONFIG.outputPath)) {
+ fs.mkdirSync(CONFIG.outputPath, { recursive: true });
+ }
+
+ // Generate Markdown dashboard
+ const markdownDashboard = this.generateMarkdownDashboard();
+ fs.writeFileSync(
+ path.join(CONFIG.outputPath, 'quality-dashboard.md'),
+ markdownDashboard
+ );
+
+ // Generate JSON dashboard for API consumption
+ const jsonDashboard = this.generateJsonDashboard();
+ fs.writeFileSync(
+ path.join(CONFIG.outputPath, 'quality-dashboard.json'),
+ JSON.stringify(jsonDashboard, null, 2)
+ );
+ }
+
+ generateMarkdownDashboard() {
+ return `# Quality Metrics Dashboard
+
+Last updated: ${new Date().toISOString()}
+
+## Test Execution
+
+- **Total Tests**: ${this.metrics.test.kpis.total_tests}
+- **Pass Rate**: ${this.metrics.test.kpis.pass_rate}%
+- **Failed Tests**: ${this.metrics.test.kpis.failed}
+- **Average Duration**: ${this.metrics.test.kpis.average_duration}s
+
+## Code Coverage
+
+| Metric | Covered | Total | Percentage |
+|--------|---------|-------|------------|
+| Lines | ${this.metrics.coverage.kpis?.lines?.covered || 0} | ${this.metrics.coverage.kpis?.lines?.total || 0} | ${this.metrics.coverage.kpis?.lines?.percentage || 0}% |
+| Statements | ${this.metrics.coverage.kpis?.statements?.covered || 0} | ${this.metrics.coverage.kpis?.statements?.total || 0} | ${this.metrics.coverage.kpis?.statements?.percentage || 0}% |
+| Functions | ${this.metrics.coverage.kpis?.functions?.covered || 0} | ${this.metrics.coverage.kpis?.functions?.total || 0} | ${this.metrics.coverage.kpis?.functions?.percentage || 0}% |
+| Branches | ${this.metrics.coverage.kpis?.branches?.covered || 0} | ${this.metrics.coverage.kpis?.branches?.total || 0} | ${this.metrics.coverage.kpis?.branches?.percentage || 0}% |
+
+## Defects (Last 8 Weeks)
+
+- **Total Defects**: ${this.metrics.defects.kpis.total}
+- **Critical/High Defects**: ${this.metrics.defects.kpis.p0_p1_count}
+- **Defect Trends**: See chart below
+
+
+e```mermaid
+lineChart
+ title Defect Trends
+ x-axis Week
+ y-axis Count
+ series Defects
+ Defects : ${this.metrics.defects.kpis.trends.map(t => t.count).join(', ')}
+```
+
+## CI/CD Metrics
+
+- **Pipeline Success Rate**: ${this.metrics.ci_cd.kpis.pipeline_success_rate}%
+- **Average Pipeline Duration**: ${this.metrics.ci_cd.kpis.average_duration} minutes
+- **Deployment Frequency**: ${this.metrics.ci_cd.kpis.deployment_frequency.toFixed(1)} per week
+- **Lead Time for Changes**: ${this.metrics.ci_cd.kpis.lead_time} days
+- **Rollback Rate**: ${this.metrics.ci_cd.kpis.rollback_rate}%
+
+## Performance (Production)
+
+- **Average Response Time**: ${this.metrics.performance.kpis?.response_time?.average || 'N/A'}ms
+- **P95 Response Time**: ${this.metrics.performance.kpis?.response_time?.p95 || 'N/A'}ms
+- **P99 Response Time**: ${this.metrics.performance.kpis?.response_time?.p99 || 'N/A'}ms
+`; }
+
+ generateJsonDashboard() {
+ return {
+ timestamp: new Date().toISOString(),
+ version: '1.0.0',
+ metrics: {
+ test: this.metrics.test.kpis,
+ coverage: this.metrics.coverage.kpis,
+ defects: this.metrics.defects.kpis,
+ ci_cd: this.metrics.ci_cd.kpis,
+ performance: this.metrics.performance.kpis
+ }
+ };
+ }
+
+ saveProcessedMetrics() {
+ console.log('Saving processed metrics...');
+
+ const outputPath = path.join(CONFIG.metricsPath, 'processed');
+ if (!fs.existsSync(outputPath)) {
+ fs.mkdirSync(outputPath, { recursive: true });
+ }
+
+ fs.writeFileSync(
+ path.join(outputPath, 'metrics-summary.json'),
+ JSON.stringify(this.metrics, null, 2)
+ );
+ }
+
+ // Helper methods
+ parseJUnitXml(xml) {
+ // Simplified XML parsing (in practice, would use xml2js or similar)
+ // This is a placeholder - actual implementation would use proper XML parser
+ return {
+ testsuites: [{
+ testcase: []
+ }]
+ };
+ }
+
+ countBy(array, fn) {
+ return array.reduce((acc, item) => {
+ const key = fn(item);
+ acc[key] = (acc[key] || 0) + 1;
+ return acc;
+ }, {});
+ }
+
+ calculateTrendData(items, period, dateExtractor) {
+ const trends = {};
+
+ for (const item of items) {
+ const dateStr = dateExtractor(item);
+ trends[dateStr] = (trends[dateStr] || 0) + 1;
+ }
+
+ // Convert to array and sort
+ return Object.entries(trends)
+ .map(([period, count]) => ({ period, count }))
+ .sort((a, b) => a.period.localeCompare(b.period));
+ }
+
+ getWeekString(date) {
+ const year = date.getFullYear();
+ const week = this.getWeekNumber(date);
+ return \
+{year}-W\
+{week.toString().padStart(2, '0')}\
+`;
+ }
+
+ getWeekNumber(date) {
+ const d = new Date(date);
+ d.setHours(0, 0, 0, 0);
+ d.setDate(d.getDate() + 4 - (d.getDay() || 7));
+ const yearStart = new Date(d.getFullYear(), 0, 1);
+ return Math.ceil((((d - yearStart) / 86400000) + 1) / 7);
+ }
+
+ getPercentile(values, percentile) {
+ const sorted = [...values].sort((a, b) => a - b);
+ const index = (percentile / 100) * (sorted.length - 1);
+ const floor = Math.floor(index);
+ const ceil = Math.ceil(index);
+
+ if (floor === ceil) {
+ return sorted[floor];
+ }
+
+ return sorted[floor] + (sorted[ceil] - sorted[floor]) * (index - floor);
+ }
+
+ calculateLeadTime(deployments) {
+ // Calculate average time from commit to deployment
+ if (deployments.length === 0) return 0;
+
+ // This is a simplified calculation - in practice would need commit timestamps
+ return 2; // Return 2 days as placeholder
+ }
+
+ calculateRollbackRate(deployments) {
+ // Calculate percentage of deployments that were rolled back
+ if (deployments.length === 0) return 0;
+
+ // This is a simplified calculation - in practice would need deployment status
+ return 5; // Return 5% as placeholder
+ }
+}
+
+// Run the processor
+async function main() {
+ const processor = new MetricsProcessor();
+ await processor.processAllMetrics();
+}
+
+// Execute if run directly
+if (require.main === module) {
+ main().catch(console.error);
+}
+
+module.exports = MetricsProcessor;
+```
+
+### Dashboard Implementation
+
+```javascript
+// components/QualityDashboard.vue
+
+