@@ -551,10 +551,6 @@ def determine_best_candidate(
551551 )
552552 speedup_ratios [candidate .optimization_id ] = perf_gain
553553
554- speedup_stats = compare_function_runtime_distributions (
555- original_code_runtime_distribution , candidate_runtime_distribution
556- )
557-
558554 tree = Tree (f"Candidate #{ candidate_index } - Sum of Minimum Runtimes" )
559555 if speedup_critic (
560556 candidate_result , original_code_baseline .runtime , best_runtime_until_now
@@ -588,28 +584,33 @@ def determine_best_candidate(
588584 console .print (tree )
589585 console .rule ()
590586
591- tree = Tree (f"Candidate #{ candidate_index } - Bayesian Bootstrapping Nonparametric Analysis" )
592- tree .add (
593- f"Expected candidate runtime (95% Credible Interval) = ["
594- f"{ humanize_runtime (candidate_runtime_statistics ['credible_interval_lower_bound' ])} , "
595- f"{ humanize_runtime (candidate_runtime_statistics ['credible_interval_upper_bound' ])} ], "
596- f"\n median = { humanize_runtime (candidate_runtime_statistics ['median' ])} "
597- f"\n Speedup ratio of candidate vs original:"
598- f"\n 95% Credible Interval = [{ speedup_stats ['credible_interval_lower_bound' ]:.3f} X, "
599- f"{ speedup_stats ['credible_interval_upper_bound' ]:.3f} X]"
600- f"\n median = { speedup_stats ['median' ]:.3f} X"
601- )
602- if speedup_stats ["credible_interval_lower_bound" ] > 1.0 :
603- tree .add ("The candidate is faster than the original code with a 95% probability." )
604- if speedup_stats ["median" ] > best_speedup_ratio_until_now :
605- best_speedup_ratio_until_now = speedup_stats ["median" ]
606- tree .add ("This candidate is the best candidate so far." )
587+ if candidate_runtime_distribution .any () and candidate_runtime_statistics :
588+ speedup_stats = compare_function_runtime_distributions (
589+ original_code_runtime_distribution , candidate_runtime_distribution
590+ )
591+ tree = Tree (f"Candidate #{ candidate_index } - Bayesian Bootstrapping Nonparametric Analysis" )
592+ tree .add (
593+ f"Expected candidate summed runtime (95% Credible Interval) = ["
594+ f"{ humanize_runtime (round (candidate_runtime_statistics ['credible_interval_lower_bound' ]))} "
595+ f", "
596+ f"{ humanize_runtime (round (candidate_runtime_statistics ['credible_interval_upper_bound' ]))} ]"
597+ f"\n Median = { humanize_runtime (round (candidate_runtime_statistics ['median' ]))} "
598+ f"\n Speedup ratio of candidate vs original:"
599+ f"\n 95% Credible Interval = [{ speedup_stats ['credible_interval_lower_bound' ]:.3f} X, "
600+ f"{ speedup_stats ['credible_interval_upper_bound' ]:.3f} X]"
601+ f"\n median = { speedup_stats ['median' ]:.3f} X"
602+ )
603+ if speedup_stats ["credible_interval_lower_bound" ] > 1.0 :
604+ tree .add ("The candidate is faster than the original code with a 95% probability." )
605+ if speedup_stats ["median" ] > best_speedup_ratio_until_now :
606+ best_speedup_ratio_until_now = float (speedup_stats ["median" ])
607+ tree .add ("This candidate is the best candidate so far." )
608+ else :
609+ tree .add ("This candidate is not faster than the current fastest candidate." )
607610 else :
608- tree .add ("This candidate is not faster than the current fastest candidate." )
609- else :
610- tree .add ("It is inconclusive whether the candidate is faster than the original code." )
611- console .print (tree )
612- console .rule ()
611+ tree .add ("It is inconclusive whether the candidate is faster than the original code." )
612+ console .print (tree )
613+ console .rule ()
613614
614615 self .write_code_and_helpers (original_code , original_helper_code , function_to_optimize .file_path )
615616 except KeyboardInterrupt as e :
@@ -1054,9 +1055,6 @@ def establish_original_code_baseline(
10541055 console .rule ()
10551056
10561057 total_timing = benchmarking_results .total_passed_runtime () # caution: doesn't handle the loop index
1057- runtime_distribution , runtime_statistics = benchmarking_results .bayesian_nonparametric_bootstrap_analysis (
1058- 100_000
1059- )
10601058 functions_to_remove = [
10611059 result .id .test_function_name
10621060 for result in behavioral_results
@@ -1090,9 +1088,12 @@ def establish_original_code_baseline(
10901088 console .rule ()
10911089 logger .debug (f"Total original code summed runtime (ns): { total_timing } " )
10921090 console .rule ()
1091+ runtime_distribution , runtime_statistics = benchmarking_results .bayesian_nonparametric_bootstrap_analysis (
1092+ 100_000
1093+ )
10931094 logger .info (
10941095 f"Bayesian Bootstrapping Nonparametric Analysis"
1095- f"\n Expected original code runtime (95% Credible Interval) = ["
1096+ f"\n Expected original code summed runtime (95% Credible Interval) = ["
10961097 f"{ humanize_runtime (round (runtime_statistics ['credible_interval_lower_bound' ]))} , "
10971098 f"{ humanize_runtime (round (runtime_statistics ['credible_interval_upper_bound' ]))} ], "
10981099 f"\n median: { humanize_runtime (round (runtime_statistics ['median' ]))} "
@@ -1196,18 +1197,23 @@ def run_optimized_candidate(
11961197 if (total_candidate_timing := candidate_benchmarking_results .total_passed_runtime ()) == 0 :
11971198 logger .warning ("The overall test runtime of the optimized function is 0, couldn't run tests." )
11981199 console .rule ()
1199- runtime_distribution , runtime_statistics = (
1200- candidate_benchmarking_results .bayesian_nonparametric_bootstrap_analysis (100_000 )
1201- )
1202-
1203- logger .debug (f"Total optimized code { optimization_candidate_index } runtime (ns): { total_candidate_timing } " )
1204- console .rule ()
1205- logger .debug (
1206- f"Overall code runtime (95% Credible Interval) = ["
1207- f"{ humanize_runtime (round (runtime_statistics ['credible_interval_lower_bound' ]))} , "
1208- f"{ humanize_runtime (round (runtime_statistics ['credible_interval_upper_bound' ]))} ], median: "
1209- f"{ humanize_runtime (round (runtime_statistics ['median' ]))} "
1210- )
1200+ runtime_distribution : npt .NDArray [np .float64 ] = np .array ([])
1201+ runtime_statistics : dict [str , np .float64 ] = {}
1202+ else :
1203+ logger .debug (
1204+ f"Total optimized code { optimization_candidate_index } runtime (ns): { total_candidate_timing } "
1205+ )
1206+ console .rule ()
1207+ runtime_distribution , runtime_statistics = (
1208+ candidate_benchmarking_results .bayesian_nonparametric_bootstrap_analysis (100_000 )
1209+ )
1210+ logger .debug (
1211+ f"Overall code summed runtime (95% Credible Interval) = ["
1212+ f"{ humanize_runtime (round (runtime_statistics ['credible_interval_lower_bound' ]))} , "
1213+ f"{ humanize_runtime (round (runtime_statistics ['credible_interval_upper_bound' ]))} ], median: "
1214+ f"{ humanize_runtime (round (runtime_statistics ['median' ]))} "
1215+ )
1216+ console .rule ()
12111217 return Success (
12121218 (
12131219 OptimizedCandidateResult (
0 commit comments