diff --git a/desloppify/app/commands/review/batch/prompt_template.py b/desloppify/app/commands/review/batch/prompt_template.py index e3b249f98..dcc847a2e 100644 --- a/desloppify/app/commands/review/batch/prompt_template.py +++ b/desloppify/app/commands/review/batch/prompt_template.py @@ -10,6 +10,8 @@ HIGH_SCORE_ISSUES_NOTE_THRESHOLD, ) +from desloppify.intelligence.review.personas import PERSONAS, render_persona_block + from ..prompt_sections import ( PromptBatchContext, batch_dimension_prompts, @@ -129,6 +131,17 @@ def _render_output_schema(context: PromptBatchContext, batch_index: int) -> str: + _render_context_update_example() ) +def _resolve_persona(name: str): + """Look up a Persona by name, or return None.""" + if not name: + return None + name_lower = name.lower() + for persona in PERSONAS: + if persona.name.lower() == name_lower: + return persona + return None + + def render_batch_prompt( *, repo_root: Path, @@ -141,6 +154,7 @@ def render_batch_prompt( context = build_batch_context(batch, batch_index) dim_prompts = context.dimension_prompts or batch_dimension_prompts(batch) dimension_contexts = batch.get("dimension_contexts") if isinstance(batch, dict) else None + persona = _resolve_persona(context.persona) return join_non_empty_sections( _render_metadata_block( repo_root=repo_root, @@ -148,6 +162,7 @@ def render_batch_prompt( batch_index=batch_index, context=context, ), + render_persona_block(persona), render_dimension_prompts_block(context.dimensions, dim_prompts), policy_block, render_scoring_frame(), diff --git a/desloppify/app/commands/review/prompt_sections.py b/desloppify/app/commands/review/prompt_sections.py index cf9db2d55..f973f6b0e 100644 --- a/desloppify/app/commands/review/prompt_sections.py +++ b/desloppify/app/commands/review/prompt_sections.py @@ -16,6 +16,7 @@ class PromptBatchPayload(TypedDict, total=False): name: str dimensions: list[str] why: str + persona: str dimension_prompts: dict[str, dict[str, object]] judgment_finding_counts: dict[str, object] mechanical_finding_counts: dict[str, object] @@ -31,6 +32,7 @@ class PromptBatchContext: rationale: str issues_cap: int dimension_prompts: dict[str, dict[str, object]] + persona: str @property def dimension_set(self) -> set[str]: @@ -55,6 +57,7 @@ def build_batch_context(batch: PromptBatchPayload, batch_index: int) -> PromptBa rationale=str(batch.get("why", "")).strip(), issues_cap=max_batch_issues_for_dimension_count(len(dimensions)), dimension_prompts=batch_dimension_prompts(batch), + persona=str(batch.get("persona", "")).strip(), ) diff --git a/desloppify/data/global/CLAUDE.md b/desloppify/data/global/CLAUDE.md index f9c283a78..c0828b12d 100644 --- a/desloppify/data/global/CLAUDE.md +++ b/desloppify/data/global/CLAUDE.md @@ -25,6 +25,7 @@ Run `desloppify review --prepare` first to generate review data, then use Claude 1. Each agent must be context-isolated — do not pass conversation history or score targets. 2. Agents must consume `.desloppify/review_packet_blind.json` (not full `query.json`) to avoid score anchoring. +3. Each batch is automatically assigned a reviewer persona (Pragmatist, Architect, Bug Hunter, or Migrator). The persona appears in the rendered prompt and biases attention without changing scoring rules. When launching subagents manually, you can reference the persona in the task description to reinforce the bias. ### Triage workflow diff --git a/desloppify/data/global/SKILL.md b/desloppify/data/global/SKILL.md index 4c426c1b6..33b5c11e5 100644 --- a/desloppify/data/global/SKILL.md +++ b/desloppify/data/global/SKILL.md @@ -141,6 +141,19 @@ Four paths to get subjective scores: **Integrity rules:** Score from evidence only — no prior chat context, score history, or target-threshold anchoring. When evidence is mixed, score lower and explain uncertainty. Assess every requested dimension; never drop one. +#### Persona rotation + +When multiple review batches run in parallel, each batch is automatically assigned a reviewer persona. Personas bias *attention*, not scoring rules: all findings still require the same confidence thresholds. + +| Persona | Bias | Key question | +|---------|------|-------------| +| Pragmatist | Simplicity over cleverness | Would a new team member understand this in 30 seconds? | +| Architect | Boundaries, coupling, API surface | Does this respect the system's structural contracts? | +| Bug Hunter | Null/undefined, races, missing awaits | What fails under edge cases? | +| Migrator | Deprecated patterns, half-migrated code | What should have been cleaned up? | + +Personas cycle across batches automatically. A batch with the Architect persona spends more time exploring boundary violations, while a Bug Hunter batch focuses on edge-case failures. This improves coverage diversity across parallel reviews without changing the scoring rubric. + #### Review output format Return machine-readable JSON for review imports. For `--external-submit`, include `session` from the generated template: diff --git a/desloppify/intelligence/review/__init__.py b/desloppify/intelligence/review/__init__.py index 546414884..7dc8c6946 100644 --- a/desloppify/intelligence/review/__init__.py +++ b/desloppify/intelligence/review/__init__.py @@ -55,6 +55,7 @@ prepare_holistic_review, prepare_review, ) +from desloppify.intelligence.review.personas import PERSONAS, Persona, assign_personas from desloppify.intelligence.review.prepare_batches_builders import build_investigation_batches from desloppify.intelligence.review.remediation import generate_remediation_plan from desloppify.intelligence.review.selection import ( @@ -141,6 +142,10 @@ def import_holistic_issues( "prepare_review", "prepare_holistic_review", "build_investigation_batches", + # personas + "PERSONAS", + "Persona", + "assign_personas", # import "import_review_issues", "import_holistic_issues", diff --git a/desloppify/intelligence/review/personas.py b/desloppify/intelligence/review/personas.py new file mode 100644 index 000000000..9ba57367c --- /dev/null +++ b/desloppify/intelligence/review/personas.py @@ -0,0 +1,82 @@ +"""Persona rotation for parallel review batches. + +When multiple review batches run in parallel, each batch can adopt a +different reviewer persona. The persona biases *attention*, not scoring +rules: all findings still require the same confidence thresholds. + +Personas improve coverage diversity -- a Bug Hunter notices edge-case +races that an Architect overlooks, while the Architect catches boundary +violations the Pragmatist skips. +""" + +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass(frozen=True) +class Persona: + """A reviewer persona that biases attention during batch review.""" + + name: str + bias: str + key_question: str + + +PERSONAS: tuple[Persona, ...] = ( + Persona( + name="Pragmatist", + bias="Simplicity over cleverness", + key_question="Would a new team member understand this in 30 seconds?", + ), + Persona( + name="Architect", + bias="Boundaries, coupling, API surface consistency, and layer discipline", + key_question="Does this respect the system's structural contracts?", + ), + Persona( + name="Bug Hunter", + bias="Null/undefined, race conditions, missing awaits, error swallowing, and edge cases", + key_question="What fails under edge cases or concurrent access?", + ), + Persona( + name="Migrator", + bias="Deprecated patterns, half-migrated code, stale shims, and dual-path confusion", + key_question="What should have been cleaned up already?", + ), +) + + +def assign_personas(batch_count: int) -> list[Persona | None]: + """Return a persona assignment for *batch_count* batches. + + Cycles through the four personas. When there are more batches than + personas, the cycle repeats. Returns ``None`` entries only when + ``batch_count`` is zero. + """ + if batch_count <= 0: + return [] + return [PERSONAS[i % len(PERSONAS)] for i in range(batch_count)] + + +def render_persona_block(persona: Persona | None) -> str: + """Render a prompt section describing the active persona.""" + if persona is None: + return "" + return ( + f"REVIEWER PERSONA: {persona.name}\n" + f"Attention bias: {persona.bias}\n" + f"Key question: {persona.key_question}\n\n" + "The persona biases where you spend your attention, not the scoring " + "rules. All findings still require the standard confidence threshold. " + "You still report every issue you find, but you explore your " + "persona's domain more thoroughly than other areas.\n\n" + ) + + +__all__ = [ + "PERSONAS", + "Persona", + "assign_personas", + "render_persona_block", +] diff --git a/desloppify/intelligence/review/prepare_batches_builders.py b/desloppify/intelligence/review/prepare_batches_builders.py index f21f79280..2c991b783 100644 --- a/desloppify/intelligence/review/prepare_batches_builders.py +++ b/desloppify/intelligence/review/prepare_batches_builders.py @@ -4,6 +4,7 @@ from pathlib import Path +from .personas import assign_personas from .prepare_batches_collectors import _DIMENSION_FILE_MAPPING from .prepare_batches_core import ( _ensure_holistic_context, @@ -81,6 +82,11 @@ def build_investigation_batches( batches.append(batch) + personas = assign_personas(len(batches)) + for batch, persona in zip(batches, personas): + if persona is not None: + batch["persona"] = persona.name + return batches diff --git a/docs/CLAUDE.md b/docs/CLAUDE.md index f9c283a78..c0828b12d 100644 --- a/docs/CLAUDE.md +++ b/docs/CLAUDE.md @@ -25,6 +25,7 @@ Run `desloppify review --prepare` first to generate review data, then use Claude 1. Each agent must be context-isolated — do not pass conversation history or score targets. 2. Agents must consume `.desloppify/review_packet_blind.json` (not full `query.json`) to avoid score anchoring. +3. Each batch is automatically assigned a reviewer persona (Pragmatist, Architect, Bug Hunter, or Migrator). The persona appears in the rendered prompt and biases attention without changing scoring rules. When launching subagents manually, you can reference the persona in the task description to reinforce the bias. ### Triage workflow diff --git a/docs/SKILL.md b/docs/SKILL.md index 4c426c1b6..33b5c11e5 100644 --- a/docs/SKILL.md +++ b/docs/SKILL.md @@ -141,6 +141,19 @@ Four paths to get subjective scores: **Integrity rules:** Score from evidence only — no prior chat context, score history, or target-threshold anchoring. When evidence is mixed, score lower and explain uncertainty. Assess every requested dimension; never drop one. +#### Persona rotation + +When multiple review batches run in parallel, each batch is automatically assigned a reviewer persona. Personas bias *attention*, not scoring rules: all findings still require the same confidence thresholds. + +| Persona | Bias | Key question | +|---------|------|-------------| +| Pragmatist | Simplicity over cleverness | Would a new team member understand this in 30 seconds? | +| Architect | Boundaries, coupling, API surface | Does this respect the system's structural contracts? | +| Bug Hunter | Null/undefined, races, missing awaits | What fails under edge cases? | +| Migrator | Deprecated patterns, half-migrated code | What should have been cleaned up? | + +Personas cycle across batches automatically. A batch with the Architect persona spends more time exploring boundary violations, while a Bug Hunter batch focuses on edge-case failures. This improves coverage diversity across parallel reviews without changing the scoring rubric. + #### Review output format Return machine-readable JSON for review imports. For `--external-submit`, include `session` from the generated template: