Skip to content

Commit 0cecb75

Browse files
committed
refactor(gepa): remove verbose logs and consolidate comments
Remove ~25 debug/info logs per maintainer feedback: - Internal routing/processing logs - Trace processing details - Reflective example breakdowns - Config building verbosity Consolidate multi-line comments into concise single lines while preserving important context (WHY, not WHAT).
1 parent a086646 commit 0cecb75

File tree

2 files changed

+5
-59
lines changed

2 files changed

+5
-59
lines changed

dspy/teleprompt/gepa/gepa_utils.py

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,6 @@ def propose_component_texts(
171171
with dspy.context(lm=self.reflection_lm or dspy.settings.lm):
172172
# Handle regular instruction components
173173
if instruction_components:
174-
logger.debug(f"Routing {len(instruction_components)} instruction components to instruction_proposer")
175174
results.update(
176175
instruction_proposer(
177176
candidate=candidate,
@@ -182,7 +181,6 @@ def propose_component_texts(
182181

183182
# Handle components with tools (ReAct and Tool modules)
184183
if tool_module_components:
185-
logger.debug(f"Routing {len(tool_module_components)} tool_module components to tool_module_proposer")
186184
results.update(
187185
tool_module_proposer(
188186
candidate=candidate,
@@ -212,7 +210,6 @@ def build_program(self, candidate: dict[str, str]):
212210

213211
config = json.loads(value)
214212

215-
# Parse module configs and override predictor instructions
216213
for pred_name, instruction in config.items():
217214
if isinstance(instruction, str):
218215
improved_predictors[pred_name] = instruction
@@ -331,12 +328,7 @@ def make_reflective_dataset(
331328
# collect unique tools from traces for each tool-using predictor, serialize to candidate at end
332329
tools_by_predictor: dict[str, dict[str, Tool]] = {}
333330

334-
# Debug: Log what components we're trying to update
335-
logger.info(f"make_reflective_dataset called with components_to_update: {components_to_update}")
336-
337331
for pred_name in components_to_update:
338-
logger.info(f"Processing component: {pred_name}")
339-
340332
# Extract predictor name from component key
341333
if pred_name.startswith(REACT_MODULE_PREFIX):
342334
target_name = pred_name.removeprefix(f"{REACT_MODULE_PREFIX}:")
@@ -378,14 +370,10 @@ def extract_tools_from_value(value, tools_dict):
378370
if hasattr(module_score, "score"):
379371
module_score = module_score["score"]
380372

381-
logger.debug(f" Processing trace with {len(trace)} entries for example: {example}")
382373
trace_instances = [t for t in trace if t[0].signature.equals(module.signature)]
383-
logger.debug(f" Found {len(trace_instances)} matching trace instances for signature: {module.signature}")
384374
if not self.add_format_failure_as_feedback:
385375
trace_instances = [t for t in trace_instances if not isinstance(t[2], FailedPrediction)]
386-
logger.debug(f" After filtering FailedPrediction: {len(trace_instances)} instances")
387376
if len(trace_instances) == 0:
388-
logger.debug(" Skipping example - no matching trace instances")
389377
continue
390378

391379
# Extract tools that are used in the trace instances
@@ -479,23 +467,11 @@ def extract_tools_from_value(value, tools_dict):
479467

480468
items.append(d)
481469

482-
# Log exact reflective example that reflection LM will see
483-
if pred_name.startswith(REACT_MODULE_PREFIX) and len(items) == 1:
484-
logger.info(f" First reflective example for {pred_name}:")
485-
logger.info(f" Inputs: {list(d['Inputs'].keys())}")
486-
if "trajectory" in d["Inputs"]:
487-
traj = d["Inputs"]["trajectory"]
488-
logger.info(f" Trajectory length: {len(traj)} chars")
489-
logger.info(f" Trajectory sample:\n{traj[:300]}...")
490-
logger.info(f" Outputs: {list(d['Generated Outputs'].keys()) if isinstance(d['Generated Outputs'], dict) else '<string>'}")
491-
logger.info(f" Feedback: {d['Feedback'][:100]}...")
492-
493470
if len(items) == 0:
494471
logger.warning(f" No valid reflective examples found for {pred_name}")
495472
continue
496473

497474
ret_d[pred_name] = items
498-
logger.info(f" Created {len(items)} reflective examples for {pred_name}")
499475

500476
# Update candidate configs with extracted tools (after all traces processed)
501477
for pred_name, tools_dict in tools_by_predictor.items():
@@ -512,7 +488,6 @@ def extract_tools_from_value(value, tools_dict):
512488
for tool_name, tool in tools_dict.items()
513489
}
514490
candidate[pred_name] = json.dumps(config, indent=2)
515-
logger.info(f"Extracted {len(tools_dict)} tools for {pred_name}: {list(tools_dict.keys())}")
516491

517492
if len(ret_d) == 0:
518493
raise Exception("No valid predictions found for any module.")

dspy/teleprompt/gepa/instruction_proposal.py

Lines changed: 5 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -382,20 +382,15 @@ def __call__(
382382
logger.warning(f"Skipping {module_key}: not in candidate={module_key not in candidate}, not in reflective_dataset={module_key not in reflective_dataset}")
383383
continue
384384

385-
# Deserialize module config
386385
current_module_config = json.loads(candidate[module_key])
387386

388-
# Extract predictor keys (strings are predictor instructions)
389-
# Predictor keys are expected to be 1 for tool modules and 2 for ReAct modules (extra extract predictor)
387+
# Predictor keys: 1 for tool modules, 2 for ReAct modules (extra extract predictor)
390388
predictor_keys = [k for k, v in current_module_config.items() if isinstance(v, str)]
391-
logger.debug(f"Predictor keys: {predictor_keys}")
392389
primary_predictor_key = predictor_keys[0]
393390
extract_predictor_key = predictor_keys[1] if module_key.startswith(REACT_MODULE_PREFIX) else None
394391

395-
# Reconstruct Tool objects from JSON metadata so the adapter can format them for the reflection LM.
396-
# Tool.func cannot be serialized in JSON, so we use a placeholder (never executed).
392+
# Reconstruct Tool objects from JSON (func is placeholder since it can't be serialized)
397393
current_tools_dict = current_module_config.get("tools", {})
398-
logger.info(f"Found {len(current_tools_dict)} tools: {list(current_tools_dict.keys())}")
399394
tools_list = []
400395
for tool_name, tool_info in current_tools_dict.items():
401396
tool = dspy.Tool(
@@ -407,12 +402,9 @@ def __call__(
407402
tool.arg_desc = tool_info.get("arg_desc", {})
408403
tools_list.append(tool)
409404

410-
# Build dynamic signature by extending base signature
405+
# Build dynamic signature with tool-specific output fields
411406
signature = GenerateImprovedToolModuleDescriptionsFromFeedback
412407

413-
logger.debug(f"Building dynamic signature with {len(tools_list)} tools...")
414-
415-
# Add dynamic tool description and arg descriptions output fields
416408
for tool in tools_list:
417409
tool_name = tool.name
418410
tool_info = current_tools_dict[tool_name]
@@ -457,58 +449,37 @@ def __call__(
457449

458450
result = propose_descriptions(**kwargs)
459451

460-
# Build improved config from reflection LM suggestions
461-
# Reflection LM returns None for components it doesn't want to change, or text for improvements
462-
logger.info("Building improved config from reflection LM response...")
452+
# Build improved config (reflection LM returns None to keep original, or new text)
463453
improved_module_config = {}
464454

465-
# Update primary predictor instruction if reflection LM suggested improvement
466455
if result.improved_predictor_instruction is not None:
467456
improved_module_config[primary_predictor_key] = result.improved_predictor_instruction
468-
logger.debug(f"{primary_predictor_key}: {len(result.improved_predictor_instruction)} chars")
469-
else:
470-
logger.debug(f"{primary_predictor_key}: reflection LM suggests keeping original")
471457

472-
# Update extract instruction if exists and reflection LM suggested improvement
473458
if extract_predictor_key is not None and result.improved_extract_instruction is not None:
474459
improved_module_config[extract_predictor_key] = result.improved_extract_instruction
475-
logger.debug(f"{extract_predictor_key}: {len(result.improved_extract_instruction)} chars")
476-
else:
477-
logger.debug(f"{extract_predictor_key}: reflection LM suggests keeping original")
478460

479-
# Update tool descriptions if reflection LM suggested improvements
480461
improved_module_config["tools"] = {}
481462
for tool_name, tool_info in current_tools_dict.items():
482-
# Check if reflection LM suggested improving this tool's description
483463
improved_desc = getattr(result, f"improved_tool_{tool_name}_desc", None)
484-
485-
# Skip if reflection LM suggests keeping original
486464
if improved_desc is None:
487-
logger.debug(f" Tool '{tool_name}': reflection LM suggests keeping original")
488465
continue
489466

490467
improved_tool_info = {
491468
"desc": improved_desc,
492469
"arg_desc": {}
493470
}
494471

495-
# Update parameter descriptions if reflection LM suggested improvements
496472
if tool_info.get("args"):
497473
for arg_name in tool_info["args"].keys():
498474
field_name = f"improved_tool_{tool_name}_arg_{arg_name}_desc"
499475
arg_desc = getattr(result, field_name, None)
500-
if arg_desc is not None: # Reflection LM suggested improvement
476+
if arg_desc is not None:
501477
improved_tool_info["arg_desc"][arg_name] = arg_desc
502478

503479
improved_module_config["tools"][tool_name] = improved_tool_info
504-
logger.debug(f" Tool '{tool_name}': desc={len(improved_desc)} chars, params={len(improved_tool_info['arg_desc'])}")
505480

506-
# Serialize back to JSON
507481
updated_components[module_key] = json.dumps(improved_module_config, indent=2)
508-
logger.info(f"Successfully optimized {module_key}")
509-
logger.debug(f"Serialized config length: {len(updated_components[module_key])} chars")
510482

511-
logger.info(f"\nToolModuleProposer returning {len(updated_components)} components: {list(updated_components.keys())}")
512483
return updated_components
513484

514485
def _format_examples(self, reflective_dataset: list[ReflectiveExample]) -> str:

0 commit comments

Comments
 (0)