4444from codeflash .code_utils .config_consts import (
4545 COVERAGE_THRESHOLD ,
4646 INDIVIDUAL_TESTCASE_TIMEOUT ,
47+ N_CANDIDATES_EFFECTIVE ,
48+ N_CANDIDATES_LP_EFFECTIVE ,
49+ N_TESTS_TO_GENERATE_EFFECTIVE ,
4750 REPEAT_OPTIMIZATION_PROBABILITY ,
48- get_n_candidates ,
49- get_n_candidates_lp ,
50- get_n_tests_to_generate ,
51- get_total_looping_time ,
51+ TOTAL_LOOPING_TIME_EFFECTIVE ,
5252)
5353from codeflash .code_utils .deduplicate_code import normalize_code
5454from codeflash .code_utils .edit_generated_tests import (
@@ -237,7 +237,7 @@ def __init__(
237237 self .generate_and_instrument_tests_results : (
238238 tuple [GeneratedTestsList , dict [str , set [FunctionCalledInTest ]], OptimizationSet ] | None
239239 ) = None
240- n_tests = get_n_tests_to_generate ()
240+ n_tests = N_TESTS_TO_GENERATE_EFFECTIVE
241241 self .executor = concurrent .futures .ThreadPoolExecutor (
242242 max_workers = n_tests + 2 if self .experiment_id is None else n_tests + 3
243243 )
@@ -289,7 +289,7 @@ def generate_and_instrument_tests(
289289 ]
290290 ]:
291291 """Generate and instrument tests, returning all necessary data for optimization."""
292- n_tests = get_n_tests_to_generate ()
292+ n_tests = N_TESTS_TO_GENERATE_EFFECTIVE
293293 generated_test_paths = [
294294 get_test_file_path (
295295 self .test_cfg .tests_root , self .function_to_optimize .function_name , test_index , test_type = "unit"
@@ -487,7 +487,7 @@ def determine_best_candidate(
487487 dependency_code = code_context .read_only_context_code ,
488488 trace_id = self .function_trace_id [:- 4 ] + exp_type if self .experiment_id else self .function_trace_id ,
489489 line_profiler_results = original_code_baseline .line_profile_results ["str_out" ],
490- num_candidates = get_n_candidates_lp () ,
490+ num_candidates = N_CANDIDATES_LP_EFFECTIVE ,
491491 experiment_metadata = ExperimentMetadata (
492492 id = self .experiment_id , group = "control" if exp_type == "EXP0" else "experiment"
493493 )
@@ -1061,7 +1061,7 @@ def generate_tests_and_optimizations(
10611061 generated_perf_test_paths : list [Path ],
10621062 run_experiment : bool = False , # noqa: FBT001, FBT002
10631063 ) -> Result [tuple [GeneratedTestsList , dict [str , set [FunctionCalledInTest ]], OptimizationSet ], str ]:
1064- n_tests = get_n_tests_to_generate ()
1064+ n_tests = N_TESTS_TO_GENERATE_EFFECTIVE
10651065 assert len (generated_test_paths ) == n_tests
10661066 console .rule ()
10671067 # Submit the test generation task as future
@@ -1072,7 +1072,7 @@ def generate_tests_and_optimizations(
10721072 generated_test_paths ,
10731073 generated_perf_test_paths ,
10741074 )
1075- n_candidates = get_n_candidates ()
1075+ n_candidates = N_CANDIDATES_EFFECTIVE
10761076 future_optimization_candidates = self .executor .submit (
10771077 self .aiservice_client .optimize_python_code ,
10781078 read_writable_code .markdown ,
@@ -1482,7 +1482,7 @@ def establish_original_code_baseline(
14821482 instrument_codeflash_capture (
14831483 self .function_to_optimize , file_path_to_helper_classes , self .test_cfg .tests_root
14841484 )
1485- total_looping_time = get_total_looping_time ()
1485+ total_looping_time = TOTAL_LOOPING_TIME_EFFECTIVE
14861486 behavioral_results , coverage_results = self .run_and_parse_tests (
14871487 testing_type = TestingMode .BEHAVIOR ,
14881488 test_env = test_env ,
@@ -1623,7 +1623,7 @@ def run_optimized_candidate(
16231623 self .function_to_optimize , file_path_to_helper_classes , self .test_cfg .tests_root
16241624 )
16251625
1626- total_looping_time = get_total_looping_time ()
1626+ total_looping_time = TOTAL_LOOPING_TIME_EFFECTIVE
16271627 candidate_behavior_results , _ = self .run_and_parse_tests (
16281628 testing_type = TestingMode .BEHAVIOR ,
16291629 test_env = test_env ,
@@ -1678,7 +1678,7 @@ def run_optimized_candidate(
16781678 start_time : float = time .time ()
16791679 loop_count = 0
16801680 for i in range (100 ):
1681- if i >= 5 and time .time () - start_time >= get_total_looping_time () * 1.5 :
1681+ if i >= 5 and time .time () - start_time >= TOTAL_LOOPING_TIME_EFFECTIVE * 1.5 :
16821682 # * 1.5 to give unittest a bit more time to run
16831683 break
16841684 test_env ["CODEFLASH_LOOP_INDEX" ] = str (i + 1 )
@@ -1687,7 +1687,7 @@ def run_optimized_candidate(
16871687 test_env = test_env ,
16881688 test_files = self .test_files ,
16891689 optimization_iteration = optimization_candidate_index ,
1690- testing_time = get_total_looping_time () ,
1690+ testing_time = TOTAL_LOOPING_TIME_EFFECTIVE ,
16911691 unittest_loop_index = i + 1 ,
16921692 )
16931693 loop_count = i + 1
@@ -1726,7 +1726,7 @@ def run_and_parse_tests(
17261726 test_env : dict [str , str ],
17271727 test_files : TestFiles ,
17281728 optimization_iteration : int ,
1729- testing_time : float = get_total_looping_time () ,
1729+ testing_time : float = TOTAL_LOOPING_TIME_EFFECTIVE ,
17301730 * ,
17311731 enable_coverage : bool = False ,
17321732 pytest_min_loops : int = 5 ,
@@ -1877,7 +1877,7 @@ def line_profiler_step(
18771877 test_env = test_env ,
18781878 test_files = self .test_files ,
18791879 optimization_iteration = 0 ,
1880- testing_time = get_total_looping_time () ,
1880+ testing_time = TOTAL_LOOPING_TIME_EFFECTIVE ,
18811881 enable_coverage = False ,
18821882 code_context = code_context ,
18831883 line_profiler_output_file = line_profiler_output_file ,
0 commit comments