4444from codeflash .code_utils .config_consts import (
4545 COVERAGE_THRESHOLD ,
4646 INDIVIDUAL_TESTCASE_TIMEOUT ,
47- N_CANDIDATES ,
48- N_TESTS_TO_GENERATE ,
4947 REPEAT_OPTIMIZATION_PROBABILITY ,
50- TOTAL_LOOPING_TIME ,
48+ get_n_candidates ,
49+ get_n_candidates_lp ,
50+ get_n_tests_to_generate ,
51+ get_total_looping_time ,
5152)
5253from codeflash .code_utils .deduplicate_code import normalize_code
5354from codeflash .code_utils .edit_generated_tests import (
@@ -236,8 +237,9 @@ def __init__(
236237 self .generate_and_instrument_tests_results : (
237238 tuple [GeneratedTestsList , dict [str , set [FunctionCalledInTest ]], OptimizationSet ] | None
238239 ) = None
240+ n_tests = get_n_tests_to_generate ()
239241 self .executor = concurrent .futures .ThreadPoolExecutor (
240- max_workers = N_TESTS_TO_GENERATE + 2 if self .experiment_id is None else N_TESTS_TO_GENERATE + 3
242+ max_workers = n_tests + 2 if self .experiment_id is None else n_tests + 3
241243 )
242244
243245 def can_be_optimized (self ) -> Result [tuple [bool , CodeOptimizationContext , dict [Path , str ]], str ]:
@@ -287,17 +289,18 @@ def generate_and_instrument_tests(
287289 ]
288290 ]:
289291 """Generate and instrument tests, returning all necessary data for optimization."""
292+ n_tests = get_n_tests_to_generate ()
290293 generated_test_paths = [
291294 get_test_file_path (
292295 self .test_cfg .tests_root , self .function_to_optimize .function_name , test_index , test_type = "unit"
293296 )
294- for test_index in range (N_TESTS_TO_GENERATE )
297+ for test_index in range (n_tests )
295298 ]
296299 generated_perf_test_paths = [
297300 get_test_file_path (
298301 self .test_cfg .tests_root , self .function_to_optimize .function_name , test_index , test_type = "perf"
299302 )
300- for test_index in range (N_TESTS_TO_GENERATE )
303+ for test_index in range (n_tests )
301304 ]
302305
303306 with progress_bar (
@@ -484,7 +487,7 @@ def determine_best_candidate(
484487 dependency_code = code_context .read_only_context_code ,
485488 trace_id = self .function_trace_id [:- 4 ] + exp_type if self .experiment_id else self .function_trace_id ,
486489 line_profiler_results = original_code_baseline .line_profile_results ["str_out" ],
487- num_candidates = 10 ,
490+ num_candidates = get_n_candidates_lp () ,
488491 experiment_metadata = ExperimentMetadata (
489492 id = self .experiment_id , group = "control" if exp_type == "EXP0" else "experiment"
490493 )
@@ -1058,7 +1061,8 @@ def generate_tests_and_optimizations(
10581061 generated_perf_test_paths : list [Path ],
10591062 run_experiment : bool = False , # noqa: FBT001, FBT002
10601063 ) -> Result [tuple [GeneratedTestsList , dict [str , set [FunctionCalledInTest ]], OptimizationSet ], str ]:
1061- assert len (generated_test_paths ) == N_TESTS_TO_GENERATE
1064+ n_tests = get_n_tests_to_generate ()
1065+ assert len (generated_test_paths ) == n_tests
10621066 console .rule ()
10631067 # Submit the test generation task as future
10641068 future_tests = self .submit_test_generation_tasks (
@@ -1068,12 +1072,13 @@ def generate_tests_and_optimizations(
10681072 generated_test_paths ,
10691073 generated_perf_test_paths ,
10701074 )
1075+ n_candidates = get_n_candidates ()
10711076 future_optimization_candidates = self .executor .submit (
10721077 self .aiservice_client .optimize_python_code ,
10731078 read_writable_code .markdown ,
10741079 read_only_context_code ,
10751080 self .function_trace_id [:- 4 ] + "EXP0" if run_experiment else self .function_trace_id ,
1076- N_CANDIDATES ,
1081+ n_candidates ,
10771082 ExperimentMetadata (id = self .experiment_id , group = "control" ) if run_experiment else None ,
10781083 )
10791084 future_candidates_exp = None
@@ -1088,7 +1093,7 @@ def generate_tests_and_optimizations(
10881093 read_writable_code .markdown ,
10891094 read_only_context_code ,
10901095 self .function_trace_id [:- 4 ] + "EXP1" ,
1091- N_CANDIDATES ,
1096+ n_candidates ,
10921097 ExperimentMetadata (id = self .experiment_id , group = "experiment" ),
10931098 )
10941099 futures .append (future_candidates_exp )
@@ -1477,12 +1482,13 @@ def establish_original_code_baseline(
14771482 instrument_codeflash_capture (
14781483 self .function_to_optimize , file_path_to_helper_classes , self .test_cfg .tests_root
14791484 )
1485+ total_looping_time = get_total_looping_time ()
14801486 behavioral_results , coverage_results = self .run_and_parse_tests (
14811487 testing_type = TestingMode .BEHAVIOR ,
14821488 test_env = test_env ,
14831489 test_files = self .test_files ,
14841490 optimization_iteration = 0 ,
1485- testing_time = TOTAL_LOOPING_TIME ,
1491+ testing_time = total_looping_time ,
14861492 enable_coverage = test_framework == "pytest" ,
14871493 code_context = code_context ,
14881494 )
@@ -1503,7 +1509,7 @@ def establish_original_code_baseline(
15031509 )
15041510
15051511 if test_framework == "pytest" :
1506- with progress_bar ("Performing detailed line profiling..." ):
1512+ with progress_bar ("Running line profiling to identify performance bottlenecks ..." ):
15071513 line_profile_results = self .line_profiler_step (
15081514 code_context = code_context , original_helper_code = original_helper_code , candidate_index = 0
15091515 )
@@ -1514,15 +1520,15 @@ def establish_original_code_baseline(
15141520 test_env = test_env ,
15151521 test_files = self .test_files ,
15161522 optimization_iteration = 0 ,
1517- testing_time = TOTAL_LOOPING_TIME ,
1523+ testing_time = total_looping_time ,
15181524 enable_coverage = False ,
15191525 code_context = code_context ,
15201526 )
15211527 else :
15221528 benchmarking_results = TestResults ()
15231529 start_time : float = time .time ()
15241530 for i in range (100 ):
1525- if i >= 5 and time .time () - start_time >= TOTAL_LOOPING_TIME * 1.5 :
1531+ if i >= 5 and time .time () - start_time >= total_looping_time * 1.5 :
15261532 # * 1.5 to give unittest a bit more time to run
15271533 break
15281534 test_env ["CODEFLASH_LOOP_INDEX" ] = str (i + 1 )
@@ -1532,7 +1538,7 @@ def establish_original_code_baseline(
15321538 test_env = test_env ,
15331539 test_files = self .test_files ,
15341540 optimization_iteration = 0 ,
1535- testing_time = TOTAL_LOOPING_TIME ,
1541+ testing_time = total_looping_time ,
15361542 enable_coverage = False ,
15371543 code_context = code_context ,
15381544 unittest_loop_index = i + 1 ,
@@ -1617,12 +1623,13 @@ def run_optimized_candidate(
16171623 self .function_to_optimize , file_path_to_helper_classes , self .test_cfg .tests_root
16181624 )
16191625
1626+ total_looping_time = get_total_looping_time ()
16201627 candidate_behavior_results , _ = self .run_and_parse_tests (
16211628 testing_type = TestingMode .BEHAVIOR ,
16221629 test_env = test_env ,
16231630 test_files = self .test_files ,
16241631 optimization_iteration = optimization_candidate_index ,
1625- testing_time = TOTAL_LOOPING_TIME ,
1632+ testing_time = total_looping_time ,
16261633 enable_coverage = False ,
16271634 )
16281635 # Remove instrumentation
@@ -1653,7 +1660,7 @@ def run_optimized_candidate(
16531660 test_env = test_env ,
16541661 test_files = self .test_files ,
16551662 optimization_iteration = optimization_candidate_index ,
1656- testing_time = TOTAL_LOOPING_TIME ,
1663+ testing_time = total_looping_time ,
16571664 enable_coverage = False ,
16581665 )
16591666 loop_count = (
@@ -1671,7 +1678,7 @@ def run_optimized_candidate(
16711678 start_time : float = time .time ()
16721679 loop_count = 0
16731680 for i in range (100 ):
1674- if i >= 5 and time .time () - start_time >= TOTAL_LOOPING_TIME * 1.5 :
1681+ if i >= 5 and time .time () - start_time >= get_total_looping_time () * 1.5 :
16751682 # * 1.5 to give unittest a bit more time to run
16761683 break
16771684 test_env ["CODEFLASH_LOOP_INDEX" ] = str (i + 1 )
@@ -1680,7 +1687,7 @@ def run_optimized_candidate(
16801687 test_env = test_env ,
16811688 test_files = self .test_files ,
16821689 optimization_iteration = optimization_candidate_index ,
1683- testing_time = TOTAL_LOOPING_TIME ,
1690+ testing_time = get_total_looping_time () ,
16841691 unittest_loop_index = i + 1 ,
16851692 )
16861693 loop_count = i + 1
@@ -1719,7 +1726,7 @@ def run_and_parse_tests(
17191726 test_env : dict [str , str ],
17201727 test_files : TestFiles ,
17211728 optimization_iteration : int ,
1722- testing_time : float = TOTAL_LOOPING_TIME ,
1729+ testing_time : float = get_total_looping_time () ,
17231730 * ,
17241731 enable_coverage : bool = False ,
17251732 pytest_min_loops : int = 5 ,
@@ -1858,6 +1865,9 @@ def line_profiler_step(
18581865 self , code_context : CodeOptimizationContext , original_helper_code : dict [Path , str ], candidate_index : int
18591866 ) -> dict :
18601867 try :
1868+ logger .info ("Running line profiling to identify performance bottlenecks…" )
1869+ console .rule ()
1870+
18611871 test_env = self .get_test_env (
18621872 codeflash_loop_index = 0 , codeflash_test_iteration = candidate_index , codeflash_tracer_disable = 1
18631873 )
@@ -1867,7 +1877,7 @@ def line_profiler_step(
18671877 test_env = test_env ,
18681878 test_files = self .test_files ,
18691879 optimization_iteration = 0 ,
1870- testing_time = TOTAL_LOOPING_TIME ,
1880+ testing_time = get_total_looping_time () ,
18711881 enable_coverage = False ,
18721882 code_context = code_context ,
18731883 line_profiler_output_file = line_profiler_output_file ,
0 commit comments