Skip to content

Commit 8ab28d2

Browse files
committed
refactor(bench): change default behavior to show summary only, add --verbose flag
Changed from --quiet flag to --verbose flag with inverted logic: - Default mode: Shows header, statistics methodology (once), summary table, and completion - Verbose mode (--verbose/-v): Shows all individual template details with progress bars Key changes: - Removed repeated "Note: Latency statistics calculated from N iteration samples" from each template output - Created print_statistics_explanation() function that displays methodology once before the summary section - Changed --quiet/-q flag to --verbose/-v flag - Inverted all logic: default is now minimal (old quiet), verbose shows all details - Always show header, statistics explanation, summary table, and completion message - Only show progress bars and individual template results in verbose mode - Removed unused print_success() function This provides cleaner default output while still allowing detailed analysis with --verbose.
1 parent abf8cda commit 8ab28d2

File tree

1 file changed

+68
-70
lines changed

1 file changed

+68
-70
lines changed

src/bin/bench_throughput.rs

Lines changed: 68 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -434,18 +434,6 @@ fn print_section_header(text: &str) {
434434
);
435435
}
436436

437-
fn print_success(msg: &str) {
438-
let mut stdout = io::stdout();
439-
let _ = execute!(
440-
stdout,
441-
SetForegroundColor(Color::Green),
442-
Print("✓ "),
443-
ResetColor,
444-
Print(msg),
445-
Print("\n")
446-
);
447-
}
448-
449437
fn print_error(msg: &str) {
450438
let mut stdout = io::stdout();
451439
let _ = execute!(
@@ -636,23 +624,30 @@ fn print_template_results(template_name: &str, results: &[BenchmarkResult]) {
636624
} else {
637625
println!(" (high - jittery)");
638626
}
639-
640-
// Formulas note
641-
println!("\n Note: Latency statistics calculated from {} iteration samples", stats.sample_count);
642-
println!(" Each sample = average time per path for one complete iteration");
643-
println!(" - Percentiles: Nearest-rank method on sorted iteration averages");
644-
println!(" p50 = value at index ceil(n × 0.50) - 1");
645-
println!(" p95 = value at index ceil(n × 0.95) - 1");
646-
println!(" p99 = value at index ceil(n × 0.99) - 1");
647-
println!(" - Consistency: p99/p50 ratio (lower = more predictable)");
648-
println!(" - Variance: (stddev/p50) × 100% (lower = more stable)");
649-
println!(" - Stddev: √(Σ(x - mean)² / n) over iteration samples");
650627
}
651628

652629
println!();
653630
}
654631
}
655632

633+
fn print_statistics_explanation(sample_count: usize) {
634+
print_header("📖 LATENCY STATISTICS METHODOLOGY");
635+
636+
println!(" Latency statistics calculated from {} iteration samples", sample_count);
637+
println!(" Each sample = average time per path for one complete iteration");
638+
println!();
639+
println!(" Statistical Methods:");
640+
println!(" - Percentiles: Nearest-rank method on sorted iteration averages");
641+
println!(" • p50 = value at index ceil(n × 0.50) - 1");
642+
println!(" • p95 = value at index ceil(n × 0.95) - 1");
643+
println!(" • p99 = value at index ceil(n × 0.99) - 1");
644+
println!();
645+
println!(" - Consistency: p99/p50 ratio (lower = more predictable)");
646+
println!(" - Variance: (stddev/p50) × 100% (lower = more stable)");
647+
println!(" - Stddev: √(Σ(x - mean)² / n) over iteration samples");
648+
println!();
649+
}
650+
656651
fn print_summary(all_results: &[(&str, Vec<BenchmarkResult>)]) {
657652
print_header("📊 SUMMARY - Performance at Largest Input Size");
658653

@@ -806,11 +801,11 @@ fn main() {
806801
.help("Output file path (for JSON format)"),
807802
)
808803
.arg(
809-
Arg::new("quiet")
810-
.short('q')
811-
.long("quiet")
804+
Arg::new("verbose")
805+
.short('v')
806+
.long("verbose")
812807
.action(clap::ArgAction::SetTrue)
813-
.help("Minimal output (only show benchmark progress lines)"),
808+
.help("Show detailed output for each template (default shows only summary)"),
814809
)
815810
.get_matches();
816811

@@ -833,64 +828,61 @@ fn main() {
833828

834829
let format = matches.get_one::<String>("format").unwrap();
835830
let output_path = matches.get_one::<String>("output");
836-
let quiet = matches.get_flag("quiet");
831+
let verbose = matches.get_flag("verbose");
837832

838833
if sizes.is_empty() {
839834
eprintln!("Error: At least one input size is required");
840835
std::process::exit(1);
841836
}
842837

843-
if !quiet {
844-
print_header("String Pipeline Throughput Benchmark v0.13.0");
845-
let mut stdout = io::stdout();
846-
let _ = execute!(
847-
stdout,
848-
Print("Measuring batch processing performance with varying input sizes\n"),
849-
Print("Pattern: Parse and format N paths with M iterations for stability\n\n"),
850-
SetForegroundColor(Color::Cyan),
851-
Print("Input sizes: "),
852-
ResetColor,
853-
Print(format!(
854-
"{:?}\n",
855-
sizes.iter().map(|s| format_size(*s)).collect::<Vec<_>>()
856-
)),
857-
SetForegroundColor(Color::Cyan),
858-
Print("Measurement iterations: "),
859-
ResetColor,
860-
Print(format!("{}\n", iterations)),
861-
SetForegroundColor(Color::Cyan),
862-
Print("Output format: "),
863-
ResetColor,
864-
Print(format!("{}\n", format))
865-
);
866-
}
838+
// Always show header
839+
print_header("String Pipeline Throughput Benchmark v0.13.0");
840+
let mut stdout = io::stdout();
841+
let _ = execute!(
842+
stdout,
843+
Print("Measuring batch processing performance with varying input sizes\n"),
844+
Print("Pattern: Parse and format N paths with M iterations for stability\n\n"),
845+
SetForegroundColor(Color::Cyan),
846+
Print("Input sizes: "),
847+
ResetColor,
848+
Print(format!(
849+
"{:?}\n",
850+
sizes.iter().map(|s| format_size(*s)).collect::<Vec<_>>()
851+
)),
852+
SetForegroundColor(Color::Cyan),
853+
Print("Measurement iterations: "),
854+
ResetColor,
855+
Print(format!("{}\n", iterations)),
856+
SetForegroundColor(Color::Cyan),
857+
Print("Output format: "),
858+
ResetColor,
859+
Print(format!("{}\n", format))
860+
);
867861

868862
let templates = TemplateSet::get_templates();
869863
let mut all_results = Vec::new();
870864
let total_templates = templates.len();
871865

872866
for (idx, (template_name, template_str)) in templates.iter().enumerate() {
873-
if !quiet {
867+
if verbose {
874868
print_progress_bar(idx + 1, total_templates, template_name);
875869
}
876870

877871
match benchmark_template(template_name, template_str, &sizes, iterations) {
878872
Ok(results) => {
879-
if !quiet {
873+
if verbose {
880874
let mut stdout = io::stdout();
881875
let _ = execute!(
882876
stdout,
883877
cursor::MoveToColumn(0),
884878
Clear(ClearType::CurrentLine)
885879
);
886880
print_template_results(template_name, &results);
887-
} else {
888-
print_success(&format!("Benchmarking '{}'", template_name));
889881
}
890882
all_results.push((*template_name, results));
891883
}
892884
Err(e) => {
893-
if !quiet {
885+
if verbose {
894886
let mut stdout = io::stdout();
895887
let _ = execute!(
896888
stdout,
@@ -903,9 +895,16 @@ fn main() {
903895
}
904896
}
905897

906-
if !quiet {
907-
print_summary(&all_results);
908-
}
898+
// Get iteration count from first template for statistics explanation
899+
let sample_count = if !all_results.is_empty() && !all_results[0].1.is_empty() {
900+
all_results[0].1[0].latency_stats.sample_count
901+
} else {
902+
iterations
903+
};
904+
905+
// Always show statistics explanation and summary
906+
print_statistics_explanation(sample_count);
907+
print_summary(&all_results);
909908

910909
if format == "json"
911910
&& let Err(e) = output_json(&all_results, output_path.map(|s| s.as_str()))
@@ -914,14 +913,13 @@ fn main() {
914913
std::process::exit(1);
915914
}
916915

917-
if !quiet {
918-
let mut stdout = io::stdout();
919-
let _ = execute!(
920-
stdout,
921-
SetForegroundColor(Color::Green),
922-
SetAttribute(Attribute::Bold),
923-
Print("✓ Benchmark complete!\n"),
924-
ResetColor
925-
);
926-
}
916+
// Always show completion message
917+
let mut stdout = io::stdout();
918+
let _ = execute!(
919+
stdout,
920+
SetForegroundColor(Color::Green),
921+
SetAttribute(Attribute::Bold),
922+
Print("✓ Benchmark complete!\n"),
923+
ResetColor
924+
);
927925
}

0 commit comments

Comments
 (0)