992. Method-style for direct tool access: `agent.tool.tool_name(param1="value")`
1010"""
1111
12+ import asyncio
1213import json
1314import logging
1415import os
1516import random
1617from concurrent .futures import ThreadPoolExecutor
17- from typing import Any , AsyncIterator , Callable , Generator , List , Mapping , Optional , Type , TypeVar , Union , cast
18+ from typing import Any , AsyncGenerator , AsyncIterator , Callable , Mapping , Optional , Type , TypeVar , Union , cast
1819
1920from opentelemetry import trace
2021from pydantic import BaseModel
@@ -378,33 +379,43 @@ def __call__(self, prompt: str, **kwargs: Any) -> AgentResult:
378379 - metrics: Performance metrics from the event loop
379380 - state: The final state of the event loop
380381 """
381- callback_handler = kwargs .get ("callback_handler" , self .callback_handler )
382382
383- self ._start_agent_trace_span (prompt )
383+ def execute () -> AgentResult :
384+ return asyncio .run (self .invoke_async (prompt , ** kwargs ))
384385
385- try :
386- events = self ._run_loop (prompt , kwargs )
387- for event in events :
388- if "callback" in event :
389- callback_handler (** event ["callback" ])
386+ with ThreadPoolExecutor () as executor :
387+ future = executor .submit (execute )
388+ return future .result ()
390389
391- stop_reason , message , metrics , state = event [ "stop" ]
392- result = AgentResult ( stop_reason , message , metrics , state )
390+ async def invoke_async ( self , prompt : str , ** kwargs : Any ) -> AgentResult :
391+ """Process a natural language prompt through the agent's event loop.
393392
394- self ._end_agent_trace_span (response = result )
393+ This method implements the conversational interface (e.g., `agent("hello!")`). It adds the user's prompt to
394+ the conversation history, processes it through the model, executes any tool calls, and returns the final result.
395395
396- return result
396+ Args:
397+ prompt: The natural language prompt from the user.
398+ **kwargs: Additional parameters to pass through the event loop.
397399
398- except Exception as e :
399- self ._end_agent_trace_span (error = e )
400- raise
400+ Returns:
401+ Result object containing:
402+
403+ - stop_reason: Why the event loop stopped (e.g., "end_turn", "max_tokens")
404+ - message: The final message from the model
405+ - metrics: Performance metrics from the event loop
406+ - state: The final state of the event loop
407+ """
408+ events = self .stream_async (prompt , ** kwargs )
409+ async for event in events :
410+ _ = event
411+
412+ return cast (AgentResult , event ["result" ])
401413
402414 def structured_output (self , output_model : Type [T ], prompt : Optional [str ] = None ) -> T :
403415 """This method allows you to get structured output from the agent.
404416
405417 If you pass in a prompt, it will be added to the conversation history and the agent will respond to it.
406418 If you don't pass in a prompt, it will use only the conversation history to respond.
407- If no conversation history exists and no prompt is provided, an error will be raised.
408419
409420 For smaller models, you may want to use the optional prompt string to add additional instructions to explicitly
410421 instruct the model to output the structured data.
@@ -413,25 +424,52 @@ def structured_output(self, output_model: Type[T], prompt: Optional[str] = None)
413424 output_model: The output model (a JSON schema written as a Pydantic BaseModel)
414425 that the agent will use when responding.
415426 prompt: The prompt to use for the agent.
427+
428+ Raises:
429+ ValueError: If no conversation history or prompt is provided.
430+ """
431+
432+ def execute () -> T :
433+ return asyncio .run (self .structured_output_async (output_model , prompt ))
434+
435+ with ThreadPoolExecutor () as executor :
436+ future = executor .submit (execute )
437+ return future .result ()
438+
439+ async def structured_output_async (self , output_model : Type [T ], prompt : Optional [str ] = None ) -> T :
440+ """This method allows you to get structured output from the agent.
441+
442+ If you pass in a prompt, it will be added to the conversation history and the agent will respond to it.
443+ If you don't pass in a prompt, it will use only the conversation history to respond.
444+
445+ For smaller models, you may want to use the optional prompt string to add additional instructions to explicitly
446+ instruct the model to output the structured data.
447+
448+ Args:
449+ output_model: The output model (a JSON schema written as a Pydantic BaseModel)
450+ that the agent will use when responding.
451+ prompt: The prompt to use for the agent.
452+
453+ Raises:
454+ ValueError: If no conversation history or prompt is provided.
416455 """
417456 self ._hooks .invoke_callbacks (StartRequestEvent (agent = self ))
418457
419458 try :
420- messages = self .messages
421- if not messages and not prompt :
459+ if not self .messages and not prompt :
422460 raise ValueError ("No conversation history or prompt provided" )
423461
424462 # add the prompt as the last message
425463 if prompt :
426- messages .append ({"role" : "user" , "content" : [{"text" : prompt }]})
464+ self . messages .append ({"role" : "user" , "content" : [{"text" : prompt }]})
427465
428- # get the structured output from the model
429- events = self .model .structured_output (output_model , messages )
430- for event in events :
466+ events = self .model .structured_output (output_model , self .messages )
467+ async for event in events :
431468 if "callback" in event :
432469 self .callback_handler (** cast (dict , event ["callback" ]))
433470
434471 return event ["output" ]
472+
435473 finally :
436474 self ._hooks .invoke_callbacks (EndRequestEvent (agent = self ))
437475
@@ -471,21 +509,22 @@ async def stream_async(self, prompt: str, **kwargs: Any) -> AsyncIterator[Any]:
471509
472510 try :
473511 events = self ._run_loop (prompt , kwargs )
474- for event in events :
512+ async for event in events :
475513 if "callback" in event :
476514 callback_handler (** event ["callback" ])
477515 yield event ["callback" ]
478516
479- stop_reason , message , metrics , state = event ["stop" ]
480- result = AgentResult (stop_reason , message , metrics , state )
517+ result = AgentResult (* event ["stop" ])
518+ callback_handler (result = result )
519+ yield {"result" : result }
481520
482521 self ._end_agent_trace_span (response = result )
483522
484523 except Exception as e :
485524 self ._end_agent_trace_span (error = e )
486525 raise
487526
488- def _run_loop (self , prompt : str , kwargs : dict [str , Any ]) -> Generator [dict [str , Any ], None , None ]:
527+ async def _run_loop (self , prompt : str , kwargs : dict [str , Any ]) -> AsyncGenerator [dict [str , Any ], None ]:
489528 """Execute the agent's event loop with the given prompt and parameters."""
490529 self ._hooks .invoke_callbacks (StartRequestEvent (agent = self ))
491530
@@ -499,13 +538,15 @@ def _run_loop(self, prompt: str, kwargs: dict[str, Any]) -> Generator[dict[str,
499538 self .messages .append (new_message )
500539
501540 # Execute the event loop cycle with retry logic for context limits
502- yield from self ._execute_event_loop_cycle (kwargs )
541+ events = self ._execute_event_loop_cycle (kwargs )
542+ async for event in events :
543+ yield event
503544
504545 finally :
505546 self .conversation_manager .apply_management (self )
506547 self ._hooks .invoke_callbacks (EndRequestEvent (agent = self ))
507548
508- def _execute_event_loop_cycle (self , kwargs : dict [str , Any ]) -> Generator [dict [str , Any ], None , None ]:
549+ async def _execute_event_loop_cycle (self , kwargs : dict [str , Any ]) -> AsyncGenerator [dict [str , Any ], None ]:
509550 """Execute the event loop cycle with retry logic for context window limits.
510551
511552 This internal method handles the execution of the event loop cycle and implements
@@ -520,7 +561,7 @@ def _execute_event_loop_cycle(self, kwargs: dict[str, Any]) -> Generator[dict[st
520561
521562 try :
522563 # Execute the main event loop cycle
523- yield from event_loop_cycle (
564+ events = event_loop_cycle (
524565 model = self .model ,
525566 system_prompt = self .system_prompt ,
526567 messages = self .messages , # will be modified by event_loop_cycle
@@ -531,11 +572,15 @@ def _execute_event_loop_cycle(self, kwargs: dict[str, Any]) -> Generator[dict[st
531572 event_loop_parent_span = self .trace_span ,
532573 kwargs = kwargs ,
533574 )
575+ async for event in events :
576+ yield event
534577
535578 except ContextWindowOverflowException as e :
536579 # Try reducing the context size and retrying
537580 self .conversation_manager .reduce_context (self , e = e )
538- yield from self ._execute_event_loop_cycle (kwargs )
581+ events = self ._execute_event_loop_cycle (kwargs )
582+ async for event in events :
583+ yield event
539584
540585 def _record_tool_execution (
541586 self ,
@@ -560,7 +605,7 @@ def _record_tool_execution(
560605 messages: The message history to append to.
561606 """
562607 # Create user message describing the tool call
563- user_msg_content : List [ContentBlock ] = [
608+ user_msg_content : list [ContentBlock ] = [
564609 {"text" : (f"agent.tool.{ tool ['name' ]} direct tool call.\n Input parameters: { json .dumps (tool ['input' ])} \n " )}
565610 ]
566611
0 commit comments