Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions desloppify/app/commands/review/batch/prompt_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
HIGH_SCORE_ISSUES_NOTE_THRESHOLD,
)

from desloppify.intelligence.review.personas import PERSONAS, render_persona_block

from ..prompt_sections import (
PromptBatchContext,
batch_dimension_prompts,
Expand Down Expand Up @@ -129,6 +131,17 @@ def _render_output_schema(context: PromptBatchContext, batch_index: int) -> str:
+ _render_context_update_example()
)

def _resolve_persona(name: str):
"""Look up a Persona by name, or return None."""
if not name:
return None
name_lower = name.lower()
for persona in PERSONAS:
if persona.name.lower() == name_lower:
return persona
return None


def render_batch_prompt(
*,
repo_root: Path,
Expand All @@ -141,13 +154,15 @@ def render_batch_prompt(
context = build_batch_context(batch, batch_index)
dim_prompts = context.dimension_prompts or batch_dimension_prompts(batch)
dimension_contexts = batch.get("dimension_contexts") if isinstance(batch, dict) else None
persona = _resolve_persona(context.persona)
return join_non_empty_sections(
_render_metadata_block(
repo_root=repo_root,
packet_path=packet_path,
batch_index=batch_index,
context=context,
),
render_persona_block(persona),
render_dimension_prompts_block(context.dimensions, dim_prompts),
policy_block,
render_scoring_frame(),
Expand Down
3 changes: 3 additions & 0 deletions desloppify/app/commands/review/prompt_sections.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ class PromptBatchPayload(TypedDict, total=False):
name: str
dimensions: list[str]
why: str
persona: str
dimension_prompts: dict[str, dict[str, object]]
judgment_finding_counts: dict[str, object]
mechanical_finding_counts: dict[str, object]
Expand All @@ -31,6 +32,7 @@ class PromptBatchContext:
rationale: str
issues_cap: int
dimension_prompts: dict[str, dict[str, object]]
persona: str

@property
def dimension_set(self) -> set[str]:
Expand All @@ -55,6 +57,7 @@ def build_batch_context(batch: PromptBatchPayload, batch_index: int) -> PromptBa
rationale=str(batch.get("why", "")).strip(),
issues_cap=max_batch_issues_for_dimension_count(len(dimensions)),
dimension_prompts=batch_dimension_prompts(batch),
persona=str(batch.get("persona", "")).strip(),
)


Expand Down
1 change: 1 addition & 0 deletions desloppify/data/global/CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ Run `desloppify review --prepare` first to generate review data, then use Claude

1. Each agent must be context-isolated — do not pass conversation history or score targets.
2. Agents must consume `.desloppify/review_packet_blind.json` (not full `query.json`) to avoid score anchoring.
3. Each batch is automatically assigned a reviewer persona (Pragmatist, Architect, Bug Hunter, or Migrator). The persona appears in the rendered prompt and biases attention without changing scoring rules. When launching subagents manually, you can reference the persona in the task description to reinforce the bias.

### Triage workflow

Expand Down
13 changes: 13 additions & 0 deletions desloppify/data/global/SKILL.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,19 @@ Four paths to get subjective scores:

**Integrity rules:** Score from evidence only — no prior chat context, score history, or target-threshold anchoring. When evidence is mixed, score lower and explain uncertainty. Assess every requested dimension; never drop one.

#### Persona rotation

When multiple review batches run in parallel, each batch is automatically assigned a reviewer persona. Personas bias *attention*, not scoring rules: all findings still require the same confidence thresholds.

| Persona | Bias | Key question |
|---------|------|-------------|
| Pragmatist | Simplicity over cleverness | Would a new team member understand this in 30 seconds? |
| Architect | Boundaries, coupling, API surface | Does this respect the system's structural contracts? |
| Bug Hunter | Null/undefined, races, missing awaits | What fails under edge cases? |
| Migrator | Deprecated patterns, half-migrated code | What should have been cleaned up? |

Personas cycle across batches automatically. A batch with the Architect persona spends more time exploring boundary violations, while a Bug Hunter batch focuses on edge-case failures. This improves coverage diversity across parallel reviews without changing the scoring rubric.

#### Review output format

Return machine-readable JSON for review imports. For `--external-submit`, include `session` from the generated template:
Expand Down
5 changes: 5 additions & 0 deletions desloppify/intelligence/review/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
prepare_holistic_review,
prepare_review,
)
from desloppify.intelligence.review.personas import PERSONAS, Persona, assign_personas
from desloppify.intelligence.review.prepare_batches_builders import build_investigation_batches
from desloppify.intelligence.review.remediation import generate_remediation_plan
from desloppify.intelligence.review.selection import (
Expand Down Expand Up @@ -141,6 +142,10 @@ def import_holistic_issues(
"prepare_review",
"prepare_holistic_review",
"build_investigation_batches",
# personas
"PERSONAS",
"Persona",
"assign_personas",
# import
"import_review_issues",
"import_holistic_issues",
Expand Down
82 changes: 82 additions & 0 deletions desloppify/intelligence/review/personas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
"""Persona rotation for parallel review batches.

When multiple review batches run in parallel, each batch can adopt a
different reviewer persona. The persona biases *attention*, not scoring
rules: all findings still require the same confidence thresholds.

Personas improve coverage diversity -- a Bug Hunter notices edge-case
races that an Architect overlooks, while the Architect catches boundary
violations the Pragmatist skips.
"""

from __future__ import annotations

from dataclasses import dataclass


@dataclass(frozen=True)
class Persona:
"""A reviewer persona that biases attention during batch review."""

name: str
bias: str
key_question: str


PERSONAS: tuple[Persona, ...] = (
Persona(
name="Pragmatist",
bias="Simplicity over cleverness",
key_question="Would a new team member understand this in 30 seconds?",
),
Persona(
name="Architect",
bias="Boundaries, coupling, API surface consistency, and layer discipline",
key_question="Does this respect the system's structural contracts?",
),
Persona(
name="Bug Hunter",
bias="Null/undefined, race conditions, missing awaits, error swallowing, and edge cases",
key_question="What fails under edge cases or concurrent access?",
),
Persona(
name="Migrator",
bias="Deprecated patterns, half-migrated code, stale shims, and dual-path confusion",
key_question="What should have been cleaned up already?",
),
)


def assign_personas(batch_count: int) -> list[Persona | None]:
"""Return a persona assignment for *batch_count* batches.

Cycles through the four personas. When there are more batches than
personas, the cycle repeats. Returns ``None`` entries only when
``batch_count`` is zero.
"""
if batch_count <= 0:
return []
return [PERSONAS[i % len(PERSONAS)] for i in range(batch_count)]


def render_persona_block(persona: Persona | None) -> str:
"""Render a prompt section describing the active persona."""
if persona is None:
return ""
return (
f"REVIEWER PERSONA: {persona.name}\n"
f"Attention bias: {persona.bias}\n"
f"Key question: {persona.key_question}\n\n"
"The persona biases where you spend your attention, not the scoring "
"rules. All findings still require the standard confidence threshold. "
"You still report every issue you find, but you explore your "
"persona's domain more thoroughly than other areas.\n\n"
)


__all__ = [
"PERSONAS",
"Persona",
"assign_personas",
"render_persona_block",
]
6 changes: 6 additions & 0 deletions desloppify/intelligence/review/prepare_batches_builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from pathlib import Path

from .personas import assign_personas
from .prepare_batches_collectors import _DIMENSION_FILE_MAPPING
from .prepare_batches_core import (
_ensure_holistic_context,
Expand Down Expand Up @@ -81,6 +82,11 @@ def build_investigation_batches(

batches.append(batch)

personas = assign_personas(len(batches))
for batch, persona in zip(batches, personas):
if persona is not None:
batch["persona"] = persona.name

return batches


Expand Down
1 change: 1 addition & 0 deletions docs/CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ Run `desloppify review --prepare` first to generate review data, then use Claude

1. Each agent must be context-isolated — do not pass conversation history or score targets.
2. Agents must consume `.desloppify/review_packet_blind.json` (not full `query.json`) to avoid score anchoring.
3. Each batch is automatically assigned a reviewer persona (Pragmatist, Architect, Bug Hunter, or Migrator). The persona appears in the rendered prompt and biases attention without changing scoring rules. When launching subagents manually, you can reference the persona in the task description to reinforce the bias.

### Triage workflow

Expand Down
13 changes: 13 additions & 0 deletions docs/SKILL.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,19 @@ Four paths to get subjective scores:

**Integrity rules:** Score from evidence only — no prior chat context, score history, or target-threshold anchoring. When evidence is mixed, score lower and explain uncertainty. Assess every requested dimension; never drop one.

#### Persona rotation

When multiple review batches run in parallel, each batch is automatically assigned a reviewer persona. Personas bias *attention*, not scoring rules: all findings still require the same confidence thresholds.

| Persona | Bias | Key question |
|---------|------|-------------|
| Pragmatist | Simplicity over cleverness | Would a new team member understand this in 30 seconds? |
| Architect | Boundaries, coupling, API surface | Does this respect the system's structural contracts? |
| Bug Hunter | Null/undefined, races, missing awaits | What fails under edge cases? |
| Migrator | Deprecated patterns, half-migrated code | What should have been cleaned up? |

Personas cycle across batches automatically. A batch with the Architect persona spends more time exploring boundary violations, while a Bug Hunter batch focuses on edge-case failures. This improves coverage diversity across parallel reviews without changing the scoring rubric.

#### Review output format

Return machine-readable JSON for review imports. For `--external-submit`, include `session` from the generated template:
Expand Down