@@ -607,26 +607,32 @@ def determine_best_candidate(
607607 original_async_throughput = original_code_baseline .async_throughput ,
608608 best_throughput_until_now = None ,
609609 ) and quantity_of_tests_critic (candidate_result ):
610- tree .add ("This candidate is faster than the original code. 🚀" ) # TODO: Change this description
611- tree .add (f"Original summed runtime: { humanize_runtime (original_code_baseline .runtime )} " )
612- tree .add (
613- f"Best summed runtime: { humanize_runtime (candidate_result .best_test_runtime )} "
614- f"(measured over { candidate_result .max_loop_count } "
615- f"loop{ 's' if candidate_result .max_loop_count > 1 else '' } )"
616- )
617- tree .add (f"Speedup percentage: { perf_gain * 100 :.1f} %" )
618- tree .add (f"Speedup ratio: { perf_gain + 1 :.3f} X" )
619- if (
610+ # For async functions, prioritize throughput metrics over runtime
611+ is_async = (
620612 original_code_baseline .async_throughput is not None
621613 and candidate_result .async_throughput is not None
622- ):
614+ )
615+
616+ if is_async :
623617 throughput_gain_value = throughput_gain (
624618 original_throughput = original_code_baseline .async_throughput ,
625619 optimized_throughput = candidate_result .async_throughput ,
626620 )
621+ tree .add ("This candidate has better async throughput than the original code. 🚀" )
627622 tree .add (f"Original async throughput: { original_code_baseline .async_throughput } executions" )
628623 tree .add (f"Optimized async throughput: { candidate_result .async_throughput } executions" )
629624 tree .add (f"Throughput improvement: { throughput_gain_value * 100 :.1f} %" )
625+ tree .add (f"Throughput ratio: { throughput_gain_value + 1 :.3f} X" )
626+ else :
627+ tree .add ("This candidate is faster than the original code. 🚀" )
628+ tree .add (f"Original summed runtime: { humanize_runtime (original_code_baseline .runtime )} " )
629+ tree .add (
630+ f"Best summed runtime: { humanize_runtime (candidate_result .best_test_runtime )} "
631+ f"(measured over { candidate_result .max_loop_count } "
632+ f"loop{ 's' if candidate_result .max_loop_count > 1 else '' } )"
633+ )
634+ tree .add (f"Speedup percentage: { perf_gain * 100 :.1f} %" )
635+ tree .add (f"Speedup ratio: { perf_gain + 1 :.3f} X" )
630636 line_profile_test_results = self .line_profiler_step (
631637 code_context = code_context ,
632638 original_helper_code = original_helper_code ,
@@ -681,22 +687,31 @@ def determine_best_candidate(
681687 )
682688 )
683689 else :
684- tree .add (
685- f"Summed runtime: { humanize_runtime (best_test_runtime )} "
686- f"(measured over { candidate_result .max_loop_count } "
687- f"loop{ 's' if candidate_result .max_loop_count > 1 else '' } )"
688- )
689- tree .add (f"Speedup percentage: { perf_gain * 100 :.1f} %" )
690- tree .add (f"Speedup ratio: { perf_gain + 1 :.3f} X" )
691- if (
690+ # For async functions, prioritize throughput metrics over runtime even for slow candidates
691+ is_async = (
692692 original_code_baseline .async_throughput is not None
693693 and candidate_result .async_throughput is not None
694- ):
694+ )
695+
696+ if is_async :
695697 throughput_gain_value = throughput_gain (
696698 original_throughput = original_code_baseline .async_throughput ,
697699 optimized_throughput = candidate_result .async_throughput ,
698700 )
699- tree .add (f"Throughput gain: { throughput_gain_value * 100 :.1f} %" )
701+ tree .add (f"Async throughput: { candidate_result .async_throughput } executions" )
702+ tree .add (f"Throughput change: { throughput_gain_value * 100 :.1f} %" )
703+ tree .add (
704+ f"(Runtime for reference: { humanize_runtime (best_test_runtime )} over "
705+ f"{ candidate_result .max_loop_count } loop{ 's' if candidate_result .max_loop_count > 1 else '' } )"
706+ )
707+ else :
708+ tree .add (
709+ f"Summed runtime: { humanize_runtime (best_test_runtime )} "
710+ f"(measured over { candidate_result .max_loop_count } "
711+ f"loop{ 's' if candidate_result .max_loop_count > 1 else '' } )"
712+ )
713+ tree .add (f"Speedup percentage: { perf_gain * 100 :.1f} %" )
714+ tree .add (f"Speedup ratio: { perf_gain + 1 :.3f} X" )
700715
701716 if is_LSP_enabled ():
702717 lsp_log (LspMarkdownMessage (markdown = tree_to_markdown (tree )))
@@ -1502,16 +1517,21 @@ def process_review(
15021517 raise_pr = not self .args .no_pr
15031518 staging_review = self .args .staging_review
15041519 opt_review_response = ""
1505- if raise_pr or staging_review :
1520+ # Skip optimization review for async functions for now
1521+ if (raise_pr or staging_review ) and not self .function_to_optimize .is_async :
15061522 data ["root_dir" ] = git_root_dir ()
15071523 try :
15081524 opt_review_response = self .aiservice_client .get_optimization_review (
15091525 ** data , calling_fn_details = function_references
15101526 )
15111527 except Exception as e :
15121528 logger .debug (f"optimization review response failed, investigate { e } " )
1513- data ["optimization_review" ] = opt_review_response
1529+ # Always set optimization_review in data (empty string for async functions)
1530+ data ["optimization_review" ] = opt_review_response
15141531 if raise_pr and not staging_review and opt_review_response != "low" :
1532+ # Ensure root_dir is set for PR creation (needed for async functions that skip opt_review)
1533+ if "root_dir" not in data :
1534+ data ["root_dir" ] = git_root_dir ()
15151535 data ["git_remote" ] = self .args .git_remote
15161536 check_create_pr (** data )
15171537 elif staging_review :
@@ -1579,15 +1599,11 @@ def establish_original_code_baseline(
15791599 test_env = self .get_test_env (codeflash_loop_index = 0 , codeflash_test_iteration = 0 , codeflash_tracer_disable = 1 )
15801600
15811601 if self .function_to_optimize .is_async :
1582- from codeflash .code_utils .instrument_existing_tests import instrument_source_module_with_async_decorators
1602+ from codeflash .code_utils .instrument_existing_tests import add_async_decorator_to_function
15831603
1584- success , instrumented_source = instrument_source_module_with_async_decorators (
1604+ success = add_async_decorator_to_function (
15851605 self .function_to_optimize .file_path , self .function_to_optimize , TestingMode .BEHAVIOR
15861606 )
1587- if success and instrumented_source :
1588- with self .function_to_optimize .file_path .open ("w" , encoding = "utf8" ) as f :
1589- f .write (instrumented_source )
1590- logger .debug (f"Applied async instrumentation to { self .function_to_optimize .file_path } " )
15911607
15921608 # Instrument codeflash capture
15931609 with progress_bar ("Running tests to establish original code behavior..." ):
@@ -1632,19 +1648,11 @@ def establish_original_code_baseline(
16321648 console .rule ()
16331649 with progress_bar ("Running performance benchmarks..." ):
16341650 if self .function_to_optimize .is_async :
1635- from codeflash .code_utils .instrument_existing_tests import (
1636- instrument_source_module_with_async_decorators ,
1637- )
1651+ from codeflash .code_utils .instrument_existing_tests import add_async_decorator_to_function
16381652
1639- success , instrumented_source = instrument_source_module_with_async_decorators (
1653+ add_async_decorator_to_function (
16401654 self .function_to_optimize .file_path , self .function_to_optimize , TestingMode .PERFORMANCE
16411655 )
1642- if success and instrumented_source :
1643- with self .function_to_optimize .file_path .open ("w" , encoding = "utf8" ) as f :
1644- f .write (instrumented_source )
1645- logger .debug (
1646- f"Applied async performance instrumentation to { self .function_to_optimize .file_path } "
1647- )
16481656
16491657 try :
16501658 benchmarking_results , _ = self .run_and_parse_tests (
@@ -1767,19 +1775,11 @@ def run_optimized_candidate(
17671775 for module_abspath in original_helper_code :
17681776 candidate_helper_code [module_abspath ] = Path (module_abspath ).read_text ("utf-8" )
17691777 if self .function_to_optimize .is_async :
1770- from codeflash .code_utils .instrument_existing_tests import (
1771- instrument_source_module_with_async_decorators ,
1772- )
1778+ from codeflash .code_utils .instrument_existing_tests import add_async_decorator_to_function
17731779
1774- success , instrumented_source = instrument_source_module_with_async_decorators (
1780+ add_async_decorator_to_function (
17751781 self .function_to_optimize .file_path , self .function_to_optimize , TestingMode .BEHAVIOR
17761782 )
1777- if success and instrumented_source :
1778- with self .function_to_optimize .file_path .open ("w" , encoding = "utf8" ) as f :
1779- f .write (instrumented_source )
1780- logger .debug (
1781- f"Applied async behavioral instrumentation to { self .function_to_optimize .file_path } for candidate { optimization_candidate_index } "
1782- )
17831783
17841784 try :
17851785 instrument_codeflash_capture (
@@ -1820,19 +1820,11 @@ def run_optimized_candidate(
18201820 if test_framework == "pytest" :
18211821 # For async functions, instrument at definition site for performance benchmarking
18221822 if self .function_to_optimize .is_async :
1823- from codeflash .code_utils .instrument_existing_tests import (
1824- instrument_source_module_with_async_decorators ,
1825- )
1823+ from codeflash .code_utils .instrument_existing_tests import add_async_decorator_to_function
18261824
1827- success , instrumented_source = instrument_source_module_with_async_decorators (
1825+ add_async_decorator_to_function (
18281826 self .function_to_optimize .file_path , self .function_to_optimize , TestingMode .PERFORMANCE
18291827 )
1830- if success and instrumented_source :
1831- with self .function_to_optimize .file_path .open ("w" , encoding = "utf8" ) as f :
1832- f .write (instrumented_source )
1833- logger .debug (
1834- f"Applied async performance instrumentation to { self .function_to_optimize .file_path } for candidate { optimization_candidate_index } "
1835- )
18361828
18371829 try :
18381830 candidate_benchmarking_results , _ = self .run_and_parse_tests (
0 commit comments