5151 "presence_penalty" : SPANDATA .GEN_AI_REQUEST_PRESENCE_PENALTY ,
5252 "temperature" : SPANDATA .GEN_AI_REQUEST_TEMPERATURE ,
5353 "tool_calls" : SPANDATA .GEN_AI_RESPONSE_TOOL_CALLS ,
54- "tools" : SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS ,
5554 "top_k" : SPANDATA .GEN_AI_REQUEST_TOP_K ,
5655 "top_p" : SPANDATA .GEN_AI_REQUEST_TOP_P ,
5756}
@@ -203,8 +202,12 @@ def on_llm_start(
203202 if key in all_params and all_params [key ] is not None :
204203 set_data_normalized (span , attribute , all_params [key ], unpack = False )
205204
205+ _set_tools_on_span (span , all_params .get ("tools" ))
206+
206207 if should_send_default_pii () and self .include_prompts :
207- set_data_normalized (span , SPANDATA .GEN_AI_REQUEST_MESSAGES , prompts )
208+ set_data_normalized (
209+ span , SPANDATA .GEN_AI_REQUEST_MESSAGES , prompts , unpack = False
210+ )
208211
209212 def on_chat_model_start (self , serialized , messages , * , run_id , ** kwargs ):
210213 # type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any
@@ -246,14 +249,20 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
246249 if key in all_params and all_params [key ] is not None :
247250 set_data_normalized (span , attribute , all_params [key ], unpack = False )
248251
252+ _set_tools_on_span (span , all_params .get ("tools" ))
253+
249254 if should_send_default_pii () and self .include_prompts :
255+ normalized_messages = []
256+ for list_ in messages :
257+ for message in list_ :
258+ normalized_messages .append (
259+ self ._normalize_langchain_message (message )
260+ )
250261 set_data_normalized (
251262 span ,
252263 SPANDATA .GEN_AI_REQUEST_MESSAGES ,
253- [
254- [self ._normalize_langchain_message (x ) for x in list_ ]
255- for list_ in messages
256- ],
264+ normalized_messages ,
265+ unpack = False ,
257266 )
258267
259268 def on_chat_model_end (self , response , * , run_id , ** kwargs ):
@@ -351,9 +360,7 @@ def on_agent_finish(self, finish, *, run_id, **kwargs):
351360
352361 if should_send_default_pii () and self .include_prompts :
353362 set_data_normalized (
354- span ,
355- SPANDATA .GEN_AI_RESPONSE_TEXT ,
356- finish .return_values .items (),
363+ span , SPANDATA .GEN_AI_RESPONSE_TEXT , finish .return_values .items ()
357364 )
358365
359366 self ._exit_span (span_data , run_id )
@@ -473,13 +480,11 @@ def _get_token_usage(obj):
473480 if usage is not None :
474481 return usage
475482
476- # check for usage in the object itself
477483 for name in possible_names :
478484 usage = _get_value (obj , name )
479485 if usage is not None :
480486 return usage
481487
482- # no usage found anywhere
483488 return None
484489
485490
@@ -531,6 +536,87 @@ def _get_request_data(obj, args, kwargs):
531536 return (agent_name , tools )
532537
533538
539+ def _simplify_langchain_tools (tools ):
540+ # type: (Any) -> Optional[List[Any]]
541+ """Parse and simplify tools into a cleaner format."""
542+ if not tools :
543+ return None
544+
545+ if not isinstance (tools , (list , tuple )):
546+ return None
547+
548+ simplified_tools = []
549+ for tool in tools :
550+ try :
551+ if isinstance (tool , dict ):
552+
553+ if "function" in tool and isinstance (tool ["function" ], dict ):
554+ func = tool ["function" ]
555+ simplified_tool = {
556+ "name" : func .get ("name" ),
557+ "description" : func .get ("description" ),
558+ }
559+ if simplified_tool ["name" ]:
560+ simplified_tools .append (simplified_tool )
561+ elif "name" in tool :
562+ simplified_tool = {
563+ "name" : tool .get ("name" ),
564+ "description" : tool .get ("description" ),
565+ }
566+ simplified_tools .append (simplified_tool )
567+ else :
568+ name = (
569+ tool .get ("name" )
570+ or tool .get ("tool_name" )
571+ or tool .get ("function_name" )
572+ )
573+ if name :
574+ simplified_tools .append (
575+ {
576+ "name" : name ,
577+ "description" : tool .get ("description" )
578+ or tool .get ("desc" ),
579+ }
580+ )
581+ elif hasattr (tool , "name" ):
582+ simplified_tool = {
583+ "name" : getattr (tool , "name" , None ),
584+ "description" : getattr (tool , "description" , None )
585+ or getattr (tool , "desc" , None ),
586+ }
587+ if simplified_tool ["name" ]:
588+ simplified_tools .append (simplified_tool )
589+ elif hasattr (tool , "__name__" ):
590+ simplified_tools .append (
591+ {
592+ "name" : tool .__name__ ,
593+ "description" : getattr (tool , "__doc__" , None ),
594+ }
595+ )
596+ else :
597+ tool_str = str (tool )
598+ if tool_str and tool_str != "" :
599+ simplified_tools .append ({"name" : tool_str , "description" : None })
600+ except Exception :
601+ continue
602+
603+ return simplified_tools if simplified_tools else None
604+
605+
606+ def _set_tools_on_span (span , tools ):
607+ # type: (Span, Any) -> None
608+ """Set available tools data on a span if tools are provided."""
609+ if tools is not None :
610+ simplified_tools = _simplify_langchain_tools (tools )
611+ if simplified_tools :
612+ set_data_normalized (
613+ span ,
614+ SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS ,
615+ simplified_tools ,
616+ unpack = False ,
617+ )
618+
619+
534620def _wrap_configure (f ):
535621 # type: (Callable[..., Any]) -> Callable[..., Any]
536622
@@ -601,7 +687,7 @@ def new_configure(
601687 ]
602688 elif isinstance (local_callbacks , BaseCallbackHandler ):
603689 local_callbacks = [local_callbacks , sentry_handler ]
604- else : # local_callbacks is a list
690+ else :
605691 local_callbacks = [* local_callbacks , sentry_handler ]
606692
607693 return f (
@@ -638,10 +724,7 @@ def new_invoke(self, *args, **kwargs):
638724 span .set_data (SPANDATA .GEN_AI_OPERATION_NAME , "invoke_agent" )
639725 span .set_data (SPANDATA .GEN_AI_RESPONSE_STREAMING , False )
640726
641- if tools :
642- set_data_normalized (
643- span , SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS , tools , unpack = False
644- )
727+ _set_tools_on_span (span , tools )
645728
646729 # Run the agent
647730 result = f (self , * args , ** kwargs )
@@ -653,11 +736,7 @@ def new_invoke(self, *args, **kwargs):
653736 and integration .include_prompts
654737 ):
655738 set_data_normalized (
656- span ,
657- SPANDATA .GEN_AI_REQUEST_MESSAGES ,
658- [
659- input ,
660- ],
739+ span , SPANDATA .GEN_AI_REQUEST_MESSAGES , [input ], unpack = False
661740 )
662741
663742 output = result .get ("output" )
@@ -666,7 +745,7 @@ def new_invoke(self, *args, **kwargs):
666745 and should_send_default_pii ()
667746 and integration .include_prompts
668747 ):
669- span . set_data ( SPANDATA .GEN_AI_RESPONSE_TEXT , output )
748+ set_data_normalized ( span , SPANDATA .GEN_AI_RESPONSE_TEXT , output )
670749
671750 return result
672751
@@ -698,10 +777,7 @@ def new_stream(self, *args, **kwargs):
698777 span .set_data (SPANDATA .GEN_AI_OPERATION_NAME , "invoke_agent" )
699778 span .set_data (SPANDATA .GEN_AI_RESPONSE_STREAMING , True )
700779
701- if tools :
702- set_data_normalized (
703- span , SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS , tools , unpack = False
704- )
780+ _set_tools_on_span (span , tools )
705781
706782 input = args [0 ].get ("input" ) if len (args ) >= 1 else None
707783 if (
@@ -710,11 +786,7 @@ def new_stream(self, *args, **kwargs):
710786 and integration .include_prompts
711787 ):
712788 set_data_normalized (
713- span ,
714- SPANDATA .GEN_AI_REQUEST_MESSAGES ,
715- [
716- input ,
717- ],
789+ span , SPANDATA .GEN_AI_REQUEST_MESSAGES , [input ], unpack = False
718790 )
719791
720792 # Run the agent
@@ -737,7 +809,7 @@ def new_iterator():
737809 and should_send_default_pii ()
738810 and integration .include_prompts
739811 ):
740- span . set_data ( SPANDATA .GEN_AI_RESPONSE_TEXT , output )
812+ set_data_normalized ( span , SPANDATA .GEN_AI_RESPONSE_TEXT , output )
741813
742814 span .__exit__ (None , None , None )
743815
@@ -756,7 +828,7 @@ async def new_iterator_async():
756828 and should_send_default_pii ()
757829 and integration .include_prompts
758830 ):
759- span . set_data ( SPANDATA .GEN_AI_RESPONSE_TEXT , output )
831+ set_data_normalized ( span , SPANDATA .GEN_AI_RESPONSE_TEXT , output )
760832
761833 span .__exit__ (None , None , None )
762834
0 commit comments