Skip to content
Merged

Dev #19

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,19 @@ resolver = "2"
[workspace.dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
candle-core = { version = "0.8" }
candle-nn = { version = "0.8" }
candle-transformers = { version = "0.8" }
candle-core = { version = "0.10.2" }
candle-nn = { version = "0.10.2" }
candle-transformers = { version = "0.10.2" }
tokenizers = { version = "0.21.0", default-features = false, features = ["fancy-regex"] }
rand = "0.9"
ratatui = "0.29"
crossterm = "0.28"
rand = "0.10.1"
ratatui = "0.30"
crossterm = "0.29"
tokio = { version = "1", features = ["full"] }
anyhow = "1"
clap = { version = "4", features = ["derive"] }
regex = "1"
reqwest = { version = "0.12", default-features = false, features = ["blocking", "json", "stream", "rustls-tls-native-roots"] }
indicatif = "0.17"
indicatif = "0.18.4"
walkdir = "2"
chrono = { version = "0.4", features = ["serde"] }
lazy_static = "1"
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ Auto-detects your OS and CPU architecture (Intel or Apple Silicon), downloads th
Pin a specific version:

```bash
VERSION=v0.6.0 curl -fsSL https://github.com/thinkgrid-labs/diffmind/releases/latest/download/install.sh | bash
VERSION=v.x.x curl -fsSL https://github.com/thinkgrid-labs/diffmind/releases/latest/download/install.sh | bash
```

### Windows
Expand Down
4 changes: 2 additions & 2 deletions apps/tui-cli/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "diffmind"
version = "0.6.2"
edition = "2021"
version = "0.6.3"
edition = "2024"
description = "Local-first AI code review agent — powered by on-device inference"

[[bin]]
Expand Down
6 changes: 6 additions & 0 deletions apps/tui-cli/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,12 @@ pub struct Cli {
#[arg(long)]
pub debug: bool,

/// Inference device: auto (default), cpu, metal.
/// `auto` tries Metal on Apple Silicon and falls back to CPU.
/// `metal` forces GPU inference (macOS only).
#[arg(long, default_value = "auto")]
pub device: String,

/// Specific files or directories to review (optional)
pub files: Vec<String>,
}
Expand Down
6 changes: 1 addition & 5 deletions apps/tui-cli/src/git.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,7 @@ pub fn current_branch() -> Option<String> {
if output.status.success() {
let branch = String::from_utf8_lossy(&output.stdout).trim().to_string();
// "HEAD" means detached state — not useful to show
if branch == "HEAD" {
None
} else {
Some(branch)
}
if branch == "HEAD" { None } else { Some(branch) }
} else {
None
}
Expand Down
207 changes: 128 additions & 79 deletions apps/tui-cli/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use anyhow::Result;
use core_engine::{ReviewAnalyzer, ReviewFinding, Severity};
use core_engine::{DevicePreference, ReviewAnalyzer, ReviewFinding, Severity};
use indicatif::{ProgressBar, ProgressStyle};
use std::{
collections::HashSet,
Expand Down Expand Up @@ -83,6 +83,16 @@ async fn main() -> Result<()> {
Ok(())
}

// ─── Device helpers ──────────────────────────────────────────────────────────

fn parse_device(s: &str) -> DevicePreference {
match s.to_lowercase().as_str() {
"metal" => DevicePreference::Metal,
"cpu" => DevicePreference::Cpu,
_ => DevicePreference::Auto,
}
}

// ─── Severity helpers ────────────────────────────────────────────────────────

fn parse_severity(s: &str) -> Severity {
Expand Down Expand Up @@ -262,14 +272,15 @@ async fn run_static(
// ── RAG context ───────────────────────────────────────────────────────────
let index = Indexer::load(project_root);
let mut context = String::new();
if let Some(idx) = index {
if let Some(rag_text) = rag::get_rag_context(diff, &idx) {
context = rag_text;
}
if let Some(idx) = index
&& let Some(rag_text) = rag::get_rag_context(diff, &idx)
{
context = rag_text;
}

// ── Build analyzer ────────────────────────────────────────────────────────
let mut analyzer = ReviewAnalyzer::new(&model_bytes, &tokenizer_bytes)
let device_pref = parse_device(&args.device);
let mut analyzer = ReviewAnalyzer::new_with_device(&model_bytes, &tokenizer_bytes, device_pref)
.map_err(|e| anyhow::anyhow!(e.to_string()))?
.with_languages(langs)
.with_debug(args.debug);
Expand Down Expand Up @@ -304,7 +315,7 @@ async fn run_static(
});

let pb = spinner.clone();
let (all_findings, skipped) = analyzer
let (summary, skipped) = analyzer
.analyze_diff_chunked_with_progress(diff, &context, args.max_tokens, move |done, total| {
*chunk_label.lock().unwrap() = format!("chunk {}/{}", done, total);
pb.set_message(format!("Analyzing chunk {}/{}...", done, total));
Expand All @@ -323,39 +334,47 @@ async fn run_static(
);
}

// ── Filter to threshold ───────────────────────────────────────────────────
let findings: Vec<&ReviewFinding> = all_findings
// ── Filter findings to threshold ──────────────────────────────────────────
let findings: Vec<&ReviewFinding> = summary
.findings
.iter()
.filter(|f| meets_threshold(&f.severity, &min_severity))
.collect();

if findings.is_empty() {
if skipped > 0 {
eprintln!(" ? No parseable findings — try `--model 3b` for better output quality.");
} else {
eprintln!(" ✓ No issues found.");
}
eprintln!();
return Ok(false);
}

match args.format {
cli::OutputFormat::Json => {
// Emit a clean JSON array — pipe-friendly for CI dashboards
let json = serde_json::to_string_pretty(&findings)
.map_err(|e| anyhow::anyhow!(e.to_string()))?;
// Emit the full summary as JSON — pipe-friendly for CI dashboards
let out = serde_json::json!({
"findings": findings,
"positives": summary.positives,
"suggestions": summary.suggestions,
});
let json =
serde_json::to_string_pretty(&out).map_err(|e| anyhow::anyhow!(e.to_string()))?;
println!("{}", json);
}
cli::OutputFormat::Text => {
println!();
for (i, f) in findings.iter().enumerate() {
print_finding(f, i + 1, findings.len());
if findings.is_empty() {
if skipped > 0 {
eprintln!(
" ? No parseable findings — try `--model 3b` for better output quality."
);
} else {
use crossterm::style::Stylize;
eprintln!(" {} No issues found.", "✓".green().bold());
}
} else {
for (i, f) in findings.iter().enumerate() {
print_finding(f, i + 1, findings.len());
}
print_summary(findings.len(), skipped);
}
print_summary(findings.len(), skipped);
print_positives_and_suggestions(&summary.positives, &summary.suggestions);
}
}

Ok(true)
Ok(!findings.is_empty())
}

// ─── Coloured finding renderer ────────────────────────────────────────────────
Expand Down Expand Up @@ -478,19 +497,45 @@ fn print_summary(count: usize, skipped: usize) {
eprintln!();
}

fn print_positives_and_suggestions(positives: &[String], suggestions: &[String]) {
if positives.is_empty() && suggestions.is_empty() {
return;
}

if !positives.is_empty() {
eprintln!(" {}", "─".repeat(62).dark_grey());
eprintln!(" {} What looks good", "✓".green().bold());
for p in positives {
eprintln!(" {} {}", "·".green(), p);
}
eprintln!();
}

if !suggestions.is_empty() {
if positives.is_empty() {
eprintln!(" {}", "─".repeat(62).dark_grey());
}
eprintln!(" 💡 Suggestions");
for s in suggestions {
eprintln!(" {} {}", "·".dark_yellow(), s);
}
eprintln!();
}
}

// ─── TUI runner ───────────────────────────────────────────────────────────────

use crossterm::{
event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
terminal::{EnterAlternateScreen, LeaveAlternateScreen, disable_raw_mode, enable_raw_mode},
};
use ratatui::{
Frame, Terminal,
backend::{Backend, CrosstermBackend},
layout::{Constraint, Direction, Layout},
style::{Color, Modifier, Style},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
Frame, Terminal,
};

struct App {
Expand Down Expand Up @@ -543,57 +588,60 @@ async fn run_tui(
res
}

async fn tui_loop<B: Backend>(terminal: &mut Terminal<B>, app: Arc<Mutex<App>>) -> Result<()> {
async fn tui_loop<B: Backend>(terminal: &mut Terminal<B>, app: Arc<Mutex<App>>) -> Result<()>
where
B::Error: Send + Sync + 'static,
{
loop {
{
let mut app_lock = app.lock().await;
terminal.draw(|f| ui(f, &mut app_lock))?;
}

if event::poll(Duration::from_millis(100))? {
if let Event::Key(key) = event::read()? {
let mut app_lock = app.lock().await;
match key.code {
KeyCode::Char('q') => return Ok(()),
KeyCode::Down | KeyCode::Char('j') => {
let i = match app_lock.state.selected() {
Some(i) if !app_lock.findings.is_empty() => {
(i + 1) % app_lock.findings.len()
}
_ => 0,
};
app_lock.state.select(Some(i));
}
KeyCode::Up | KeyCode::Char('k') => {
let i = match app_lock.state.selected() {
Some(i) if !app_lock.findings.is_empty() => {
if i == 0 {
app_lock.findings.len() - 1
} else {
i - 1
}
if event::poll(Duration::from_millis(100))?
&& let Event::Key(key) = event::read()?
{
let mut app_lock = app.lock().await;
match key.code {
KeyCode::Char('q') => return Ok(()),
KeyCode::Down | KeyCode::Char('j') => {
let i = match app_lock.state.selected() {
Some(i) if !app_lock.findings.is_empty() => {
(i + 1) % app_lock.findings.len()
}
_ => 0,
};
app_lock.state.select(Some(i));
}
KeyCode::Up | KeyCode::Char('k') => {
let i = match app_lock.state.selected() {
Some(i) if !app_lock.findings.is_empty() => {
if i == 0 {
app_lock.findings.len() - 1
} else {
i - 1
}
_ => 0,
};
app_lock.state.select(Some(i));
}
KeyCode::Char('a') => {
if !app_lock.analyzing {
app_lock.analyzing = true;
app_lock.status = "Analyzing...".to_string();
let app_clone = Arc::clone(&app);
tokio::spawn(async move {
let app_err = Arc::clone(&app_clone);
if let Err(e) = background_analysis(app_clone).await {
let mut app = app_err.lock().await;
app.status = format!("Error: {}", e);
app.analyzing = false;
}
});
}
_ => 0,
};
app_lock.state.select(Some(i));
}
KeyCode::Char('a') => {
if !app_lock.analyzing {
app_lock.analyzing = true;
app_lock.status = "Analyzing...".to_string();
let app_clone = Arc::clone(&app);
tokio::spawn(async move {
let app_err = Arc::clone(&app_clone);
if let Err(e) = background_analysis(app_clone).await {
let mut app = app_err.lock().await;
app.status = format!("Error: {}", e);
app.analyzing = false;
}
});
}
_ => {}
}
_ => {}
}
}
}
Expand All @@ -619,28 +667,29 @@ async fn background_analysis(app: Arc<Mutex<App>>) -> Result<()> {

let index = Indexer::load(&project_root);
let mut context = String::new();
if let Some(idx) = index {
if let Some(rag_text) = rag::get_rag_context(&diff, &idx) {
context = rag_text;
}
if let Some(idx) = index
&& let Some(rag_text) = rag::get_rag_context(&diff, &idx)
{
context = rag_text;
}

let langs = detect_languages(&diff);
let mut analyzer = ReviewAnalyzer::new(&model_bytes, &tokenizer_bytes)
.map_err(|e| anyhow::anyhow!(e.to_string()))?
.with_languages(langs);
let mut analyzer =
ReviewAnalyzer::new_with_device(&model_bytes, &tokenizer_bytes, DevicePreference::Auto)
.map_err(|e| anyhow::anyhow!(e.to_string()))?
.with_languages(langs);

if let Some(req) = ticket {
analyzer = analyzer.with_requirements(req);
}

let findings = analyzer
let summary = analyzer
.analyze_diff_chunked(&diff, &context, 1024)
.map_err(|e| anyhow::anyhow!(e.to_string()))?;

let mut app_lock = app.lock().await;
let count = findings.len();
app_lock.findings = findings;
let count = summary.findings.len();
app_lock.findings = summary.findings;
app_lock.status = format!(
"Done — {} finding{}",
count,
Expand Down
2 changes: 1 addition & 1 deletion apps/tui-cli/src/rag.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::indexer::{SymbolIndex, COMMON_KEYWORDS};
use crate::indexer::{COMMON_KEYWORDS, SymbolIndex};
use regex::Regex;

const MAX_CONTEXT_BYTES: usize = 3000;
Expand Down
Loading
Loading