Skip to content

Commit 4f05fb5

Browse files
Neehar DuvvuriNeehar Duvvuri
authored andcommitted
run black
1 parent 29d44b3 commit 4f05fb5

File tree

4 files changed

+21
-18
lines changed

4 files changed

+21
-18
lines changed

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluate/_evaluate.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -318,8 +318,12 @@ def _get_token_count_columns_to_exclude(df: pd.DataFrame) -> List[str]:
318318
col
319319
for col in df.columns
320320
if (
321-
any(col.endswith(f"{metric}_prompt_tokens") or col.endswith(f"{metric}_completion_tokens") or col.endswith(f"{metric}_total_tokens")
322-
for metric in all_known_metrics)
321+
any(
322+
col.endswith(f"{metric}_prompt_tokens")
323+
or col.endswith(f"{metric}_completion_tokens")
324+
or col.endswith(f"{metric}_total_tokens")
325+
for metric in all_known_metrics
326+
)
323327
)
324328
]
325329

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_common/_base_prompty_eval.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,9 +133,7 @@ async def _do_eval(self, eval_input: Dict) -> Dict[str, Union[float, str]]: # t
133133
target=ErrorTarget.CONVERSATION,
134134
)
135135
# Call the prompty flow to get the evaluation result.
136-
prompty_output_dict = await self._flow(
137-
timeout=self._LLM_CALL_TIMEOUT, **eval_input
138-
)
136+
prompty_output_dict = await self._flow(timeout=self._LLM_CALL_TIMEOUT, **eval_input)
139137

140138
score = math.nan
141139
if prompty_output_dict:

sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_legacy/prompty/_utils.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -534,12 +534,12 @@ async def format_stream(llm_response: AsyncStream[ChatCompletionChunk]) -> Async
534534
"finish_reason": "",
535535
"model_id": "",
536536
"sample_input": "",
537-
"sample_output": ""
537+
"sample_output": "",
538538
}
539539

540540
if not is_first_choice:
541541
to_ret["llm_output"] = response
542-
return to_ret # we don't actually use this code path since streaming is not used, so set token counts to 0
542+
return to_ret # we don't actually use this code path since streaming is not used, so set token counts to 0
543543

544544
is_json_format = isinstance(response_format, dict) and response_format.get("type") == "json_object"
545545
if isinstance(response, AsyncStream):
@@ -555,22 +555,26 @@ async def format_stream(llm_response: AsyncStream[ChatCompletionChunk]) -> Async
555555
response.usage.completion_tokens if response.usage and response.usage.completion_tokens else 0
556556
)
557557
total_token_count = response.usage.total_tokens if response.usage and response.usage.total_tokens else 0
558-
finish_reason = response.choices[0].finish_reason if response.choices and response.choices[0].finish_reason else ""
558+
finish_reason = (
559+
response.choices[0].finish_reason if response.choices and response.choices[0].finish_reason else ""
560+
)
559561
model_id = response.model if response.model else ""
560-
sample_output_list = [{"role": response.choices[0].message.role, "content": response.choices[0].message.content}] if (response.choices and response.choices[0].message.content
561-
and response.choices[0].message.role) else []
562+
sample_output_list = (
563+
[{"role": response.choices[0].message.role, "content": response.choices[0].message.content}]
564+
if (response.choices and response.choices[0].message.content and response.choices[0].message.role)
565+
else []
566+
)
562567
sample_output = json.dumps(sample_output_list)
563568
input_str = f"{json.dumps(inputs)}" if inputs else ""
564569
if inputs and len(inputs) > 0:
565-
sample_input_json = []
570+
sample_input_json = []
566571
msg = ChatCompletionUserMessageParam(
567572
role="user",
568573
content=input_str,
569574
)
570575
sample_input_json.append(msg)
571576
sample_input = json.dumps(sample_input_json)
572577

573-
574578
# When calling function/tool, function_call/tool_call response will be returned as a field in message,
575579
# so we need return message directly. Otherwise, we only return content.
576580
# https://platform.openai.com/docs/api-reference/chat/object#chat/object-choices
@@ -589,6 +593,7 @@ async def format_stream(llm_response: AsyncStream[ChatCompletionChunk]) -> Async
589593
to_ret["sample_output"] = sample_output
590594
return to_ret
591595

596+
592597
def openai_error_retryable(
593598
error: OpenAIError, retry: int, entity_retry: List[int], max_entity_retries: int
594599
) -> Tuple[bool, float]:

sdk/evaluation/azure-ai-evaluation/tests/e2etests/test_prompty_async.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,7 @@ async def test_first_match_text_json_missing(self, prompty_config: Dict[str, Any
184184
async def test_first_match_text_json_streaming(self, prompty_config: Dict[str, Any]):
185185
prompty_config["model"]["parameters"]["stream"] = True
186186
prompty = AsyncPrompty(JSON_PROMPTY, **prompty_config)
187-
result = await prompty(
188-
question="What is the capital of France?", firstName="Barbra", lastName="Streisand"
189-
)
187+
result = await prompty(question="What is the capital of France?", firstName="Barbra", lastName="Streisand")
190188
assert isinstance(result, dict)
191189
llm_output = result["llm_output"]
192190
assert isinstance(llm_output, Mapping)
@@ -198,9 +196,7 @@ async def test_first_match_text_json_streaming(self, prompty_config: Dict[str, A
198196
async def test_full_text(self, prompty_config: Dict[str, Any]):
199197
prompty_config["model"]["response"] = "full"
200198
prompty = AsyncPrompty(BASIC_PROMPTY, **prompty_config)
201-
result = await prompty(
202-
question="What is the capital of France?", firstName="Barbra", lastName="Streisand"
203-
)
199+
result = await prompty(question="What is the capital of France?", firstName="Barbra", lastName="Streisand")
204200
assert isinstance(result, dict)
205201
llm_output = result["llm_output"]
206202
assert isinstance(llm_output, ChatCompletion)

0 commit comments

Comments
 (0)