|
34 | 34 | ) |
35 | 35 | from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes |
36 | 36 | from testing_support.ml_testing_utils import ( |
37 | | - add_token_count_to_events, |
| 37 | + add_token_count_streaming_events, |
| 38 | + add_token_counts_to_chat_events, |
38 | 39 | disabled_ai_monitoring_record_content_settings, |
39 | 40 | disabled_ai_monitoring_settings, |
40 | 41 | disabled_ai_monitoring_streaming_settings, |
@@ -206,7 +207,7 @@ def _test(): |
206 | 207 | @reset_core_stats_engine() |
207 | 208 | @override_llm_token_callback_settings(llm_token_count_callback) |
208 | 209 | def test_bedrock_chat_completion_with_token_count(set_trace_info, exercise_model, expected_events, expected_metrics): |
209 | | - @validate_custom_events(add_token_count_to_events(expected_events)) |
| 210 | + @validate_custom_events(add_token_counts_to_chat_events(add_token_count_streaming_events(expected_events))) |
210 | 211 | # One summary event, one user message, and one response message from the assistant |
211 | 212 | @validate_custom_event_count(count=3) |
212 | 213 | @validate_transaction_metrics( |
@@ -455,51 +456,6 @@ def _test(): |
455 | 456 | _test() |
456 | 457 |
|
457 | 458 |
|
458 | | -@reset_core_stats_engine() |
459 | | -@override_llm_token_callback_settings(llm_token_count_callback) |
460 | | -def test_bedrock_chat_completion_error_incorrect_access_key_with_token( |
461 | | - monkeypatch, |
462 | | - bedrock_server, |
463 | | - exercise_model, |
464 | | - set_trace_info, |
465 | | - expected_invalid_access_key_error_events, |
466 | | - expected_metrics, |
467 | | -): |
468 | | - @validate_custom_events(add_token_count_to_events(expected_invalid_access_key_error_events)) |
469 | | - @validate_error_trace_attributes( |
470 | | - _client_error_name, |
471 | | - exact_attrs={ |
472 | | - "agent": {}, |
473 | | - "intrinsic": {}, |
474 | | - "user": { |
475 | | - "http.statusCode": 403, |
476 | | - "error.message": "The security token included in the request is invalid.", |
477 | | - "error.code": "UnrecognizedClientException", |
478 | | - }, |
479 | | - }, |
480 | | - ) |
481 | | - @validate_transaction_metrics( |
482 | | - name="test_bedrock_chat_completion", |
483 | | - scoped_metrics=expected_metrics, |
484 | | - rollup_metrics=expected_metrics, |
485 | | - custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], |
486 | | - background_task=True, |
487 | | - ) |
488 | | - @background_task(name="test_bedrock_chat_completion") |
489 | | - def _test(): |
490 | | - monkeypatch.setattr(bedrock_server._request_signer._credentials, "access_key", "INVALID-ACCESS-KEY") |
491 | | - |
492 | | - with pytest.raises(_client_error): # not sure where this exception actually comes from |
493 | | - set_trace_info() |
494 | | - add_custom_attribute("llm.conversation_id", "my-awesome-id") |
495 | | - add_custom_attribute("llm.foo", "bar") |
496 | | - add_custom_attribute("non_llm_attr", "python-agent") |
497 | | - |
498 | | - exercise_model(prompt="Invalid Token", temperature=0.7, max_tokens=100) |
499 | | - |
500 | | - _test() |
501 | | - |
502 | | - |
503 | 459 | def invoke_model_malformed_request_body(loop, bedrock_server, response_streaming): |
504 | 460 | async def _coro(): |
505 | 461 | with pytest.raises(_client_error): |
@@ -798,58 +754,6 @@ async def _test(): |
798 | 754 | loop.run_until_complete(_test()) |
799 | 755 |
|
800 | 756 |
|
801 | | -@reset_core_stats_engine() |
802 | | -@override_llm_token_callback_settings(llm_token_count_callback) |
803 | | -@validate_custom_events(add_token_count_to_events(chat_completion_expected_streaming_error_events)) |
804 | | -@validate_custom_event_count(count=2) |
805 | | -@validate_error_trace_attributes( |
806 | | - _event_stream_error_name, |
807 | | - exact_attrs={ |
808 | | - "agent": {}, |
809 | | - "intrinsic": {}, |
810 | | - "user": { |
811 | | - "error.message": "Malformed input request, please reformat your input and try again.", |
812 | | - "error.code": "ValidationException", |
813 | | - }, |
814 | | - }, |
815 | | - forgone_params={"agent": (), "intrinsic": (), "user": ("http.statusCode")}, |
816 | | -) |
817 | | -@validate_transaction_metrics( |
818 | | - name="test_bedrock_chat_completion", |
819 | | - scoped_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], |
820 | | - rollup_metrics=[("Llm/completion/Bedrock/invoke_model_with_response_stream", 1)], |
821 | | - custom_metrics=[(f"Supportability/Python/ML/Bedrock/{BOTOCORE_VERSION}", 1)], |
822 | | - background_task=True, |
823 | | -) |
824 | | -@background_task(name="test_bedrock_chat_completion") |
825 | | -def test_bedrock_chat_completion_error_streaming_exception_with_token_count(loop, bedrock_server, set_trace_info): |
826 | | - """ |
827 | | - Duplicate of test_bedrock_chat_completion_error_streaming_exception, but with token callback being set. |
828 | | -
|
829 | | - See the original test for a description of the error case. |
830 | | - """ |
831 | | - |
832 | | - async def _test(): |
833 | | - with pytest.raises(_event_stream_error): |
834 | | - model = "amazon.titan-text-express-v1" |
835 | | - body = (chat_completion_payload_templates[model] % ("Streaming Exception", 0.7, 100)).encode("utf-8") |
836 | | - |
837 | | - set_trace_info() |
838 | | - add_custom_attribute("llm.conversation_id", "my-awesome-id") |
839 | | - add_custom_attribute("llm.foo", "bar") |
840 | | - add_custom_attribute("non_llm_attr", "python-agent") |
841 | | - |
842 | | - response = await bedrock_server.invoke_model_with_response_stream( |
843 | | - body=body, modelId=model, accept="application/json", contentType="application/json" |
844 | | - ) |
845 | | - |
846 | | - body = response.get("body") |
847 | | - async for resp in body: |
848 | | - assert resp |
849 | | - |
850 | | - loop.run_until_complete(_test()) |
851 | | - |
852 | | - |
853 | 757 | def test_bedrock_chat_completion_functions_marked_as_wrapped_for_sdk_compatibility(bedrock_server): |
854 | 758 | assert bedrock_server._nr_wrapped |
855 | 759 |
|
|
0 commit comments