Skip to content

Commit 2f3d9f1

Browse files
committed
Resolving some of Preetam's comments by improving documentation / resolving typos
1 parent 9b9c25a commit 2f3d9f1

File tree

2 files changed

+17
-19
lines changed

2 files changed

+17
-19
lines changed

aimon/reprompting_api/pipeline.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -92,15 +92,18 @@ def run(self, system_prompt: str, context: str, user_query: str, user_instructio
9292
Process:
9393
1. Build an initial prompt with query, context, and instructions.
9494
2. Call the LLM to generate a response.
95-
3. Evaluate the response with AIMon detectors.
95+
3. Evaluate the response with AIMon detectors for instruction adherence, toxicity, and groundedness.
96+
Toxicity and groundedness are always evaluated. If user_instructions are empty / not provided, the
97+
instruction adherence detector is not used.
9698
4. If violations are found, iteratively generate corrective prompts and re-prompt the LLM.
97-
5. Stop when all instructions are followed or iteration limits are reached.
99+
5. Stop when all instructions are followed and response has no hallucination or toxicity or when iteration or latency limits are reached.
98100
6. Return the best response (lowest residual error) along with telemetry and a summary if configured.
99101
100102
Args:
101-
user_query (str): The user's query or instruction.
102-
context (str): Contextual information to include in the prompt.
103-
user_instructions (list[str]): Instructions the model must follow.
103+
user_query (str): Must be a non-empty string. The user's query or instruction.
104+
context (str): Contextual information to include in the prompt. May be an empty string, but it is recommended to be included.
105+
user_instructions (list[str]): Instructions the model must follow. May be an empty list, but it is highly recommended to be included.
106+
system_prompt (str): A high‑level role or behavior definition for the model. May be an empty string.
104107
105108
Returns:
106109
dict:
@@ -130,7 +133,7 @@ def run(self, system_prompt: str, context: str, user_query: str, user_instructio
130133
curr_result = self._detect_aimon_response(curr_payload, self.config.feedback_model_max_retries)
131134
logger.debug(f"AIMon evaluation result: {curr_result}")
132135

133-
# Get scores and detailed feedback on failured instructions
136+
# Get scores and detailed feedback on failed instructions
134137
scores, feedback = self.get_response_feedback(curr_result)
135138
self._record_iteration_output(iteration_outputs, iteration_num, curr_generated_text, curr_result)
136139

@@ -158,6 +161,7 @@ def run(self, system_prompt: str, context: str, user_query: str, user_instructio
158161
curr_prompt = self._build_corrective_prompt(curr_payload, curr_result)
159162

160163
# Retry LLM call with corrective prompt
164+
curr_generated_text = self._call_llm(curr_prompt, self.config.user_model_max_retries)
161165
curr_generated_text = self._call_llm(curr_prompt,self.config.user_model_max_retries, system_prompt, context, user_query)
162166
# Re-evaluate the new response
163167
curr_payload = self._build_aimon_payload(context, user_query, user_instructions, curr_generated_text, system_prompt)

aimon/reprompting_api/runner.py

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,13 @@ def run_reprompting_pipeline(
3131
and `"[no context provided]"`) to ensure template consistency.
3232
3333
Args:
34-
llm_fn (Callable[[Template, str, str, str], str]):
35-
A function to call the LLM. Must accept a prompt template (recommended_prompt_template),
36-
`system_prompt`, `context`, and `user_query`.
37-
user_query (str):
38-
The user’s query. Must be a non-empty string.
39-
system_prompt (str, optional):
40-
A system-level instruction string. Defaults to `"[no system prompt provided]"` if None or empty.
41-
context (str, optional):
42-
Supplemental context for the LLM. Defaults to `"[no context provided]"` if None or empty.
43-
user_instructions (List[str], optional):
44-
A list of instructions for the model to follow. Defaults to an empty list.
45-
reprompting_config (RepromptingConfig, optional):
46-
Configuration object for controlling pipeline behavior.
34+
llm_fn (Callable[[Template, str, str, str], str]): A function to call the LLM. Must accept a prompt template (recommended_prompt_template),
35+
`system_prompt`, `context`, and `user_query`.
36+
user_query (str): The user’s query. Must be a non-empty string.
37+
system_prompt (str, optional): A system-level instruction string. Defaults to `"[no system prompt provided]"` if None or empty.
38+
context (str, optional): Supplemental context for the LLM. Defaults to `"[no context provided]"` if None or empty.
39+
user_instructions (List[str], optional): A list of instructions for the model to follow. Defaults to an empty list.
40+
reprompting_config (RepromptingConfig, optional): Configuration object for controlling pipeline behavior.
4741
4842
Returns:
4943
dict: A structured dictionary containing:

0 commit comments

Comments
 (0)