Skip to content

Commit 60f73c3

Browse files
committed
Merge branch 'main' into refactor/callback-pipeline
Resolved conflict in functions.py by accepting main's version. Our branch does not modify functions.py.
2 parents 14cac6e + e0e5384 commit 60f73c3

File tree

5 files changed

+207
-108
lines changed

5 files changed

+207
-108
lines changed

pyproject.toml

Lines changed: 47 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -25,43 +25,43 @@ classifiers = [ # List of https://pypi.org/classifiers/
2525
]
2626
dependencies = [
2727
# go/keep-sorted start
28-
"PyYAML>=6.0.2, <7.0.0", # For APIHubToolset.
29-
"absolufy-imports>=0.3.1, <1.0.0", # For Agent Engine deployment.
30-
"anyio>=4.9.0, <5.0.0;python_version>='3.10'", # For MCP Session Manager
31-
"authlib>=1.5.1, <2.0.0", # For RestAPI Tool
32-
"click>=8.1.8, <9.0.0", # For CLI tools
33-
"fastapi>=0.115.0, <1.0.0", # FastAPI framework
34-
"google-api-python-client>=2.157.0, <3.0.0", # Google API client discovery
35-
"google-cloud-aiplatform[agent_engines]>=1.121.0, <2.0.0",# For VertexAI integrations, e.g. example store.
36-
"google-cloud-bigtable>=2.32.0", # For Bigtable database
37-
"google-cloud-discoveryengine>=0.13.12, <0.14.0", # For Discovery Engine Search Tool
38-
"google-cloud-secret-manager>=2.22.0, <3.0.0", # Fetching secrets in RestAPI Tool
39-
"google-cloud-spanner>=3.56.0, <4.0.0", # For Spanner database
40-
"google-cloud-speech>=2.30.0, <3.0.0", # For Audio Transcription
41-
"google-cloud-storage>=2.18.0, <3.0.0", # For GCS Artifact service
42-
"google-genai>=1.41.0, <2.0.0", # Google GenAI SDK
43-
"graphviz>=0.20.2, <1.0.0", # Graphviz for graph rendering
44-
"mcp>=1.8.0, <2.0.0;python_version>='3.10'", # For MCP Toolset
45-
"opentelemetry-api>=1.37.0, <=1.37.0", # OpenTelemetry - limit upper version for sdk and api to not risk breaking changes from unstable _logs package.
28+
"PyYAML>=6.0.2, <7.0.0", # For APIHubToolset.
29+
"absolufy-imports>=0.3.1, <1.0.0", # For Agent Engine deployment.
30+
"anyio>=4.9.0, <5.0.0;python_version>='3.10'", # For MCP Session Manager
31+
"authlib>=1.5.1, <2.0.0", # For RestAPI Tool
32+
"click>=8.1.8, <9.0.0", # For CLI tools
33+
"fastapi>=0.115.0, <1.119.0", # FastAPI framework
34+
"google-api-python-client>=2.157.0, <3.0.0", # Google API client discovery
35+
"google-cloud-aiplatform[agent_engines]>=1.121.0, <2.0.0", # For VertexAI integrations, e.g. example store.
36+
"google-cloud-bigtable>=2.32.0", # For Bigtable database
37+
"google-cloud-discoveryengine>=0.13.12, <0.14.0", # For Discovery Engine Search Tool
38+
"google-cloud-secret-manager>=2.22.0, <3.0.0", # Fetching secrets in RestAPI Tool
39+
"google-cloud-spanner>=3.56.0, <4.0.0", # For Spanner database
40+
"google-cloud-speech>=2.30.0, <3.0.0", # For Audio Transcription
41+
"google-cloud-storage>=2.18.0, <3.0.0", # For GCS Artifact service
42+
"google-genai>=1.41.0, <2.0.0", # Google GenAI SDK
43+
"graphviz>=0.20.2, <1.0.0", # Graphviz for graph rendering
44+
"mcp>=1.8.0, <2.0.0;python_version>='3.10'", # For MCP Toolset
45+
"opentelemetry-api>=1.37.0, <=1.37.0", # OpenTelemetry - limit upper version for sdk and api to not risk breaking changes from unstable _logs package.
4646
"opentelemetry-exporter-gcp-logging>=1.9.0a0, <2.0.0",
4747
"opentelemetry-exporter-gcp-monitoring>=1.9.0a0, <2.0.0",
4848
"opentelemetry-exporter-gcp-trace>=1.9.0, <2.0.0",
4949
"opentelemetry-exporter-otlp-proto-http>=1.36.0",
5050
"opentelemetry-resourcedetector-gcp>=1.9.0a0, <2.0.0",
5151
"opentelemetry-sdk>=1.37.0, <=1.37.0",
52-
"pydantic>=2.0, <3.0.0", # For data validation/models
53-
"python-dateutil>=2.9.0.post0, <3.0.0", # For Vertext AI Session Service
54-
"python-dotenv>=1.0.0, <2.0.0", # To manage environment variables
52+
"pydantic>=2.0, <3.0.0", # For data validation/models
53+
"python-dateutil>=2.9.0.post0, <3.0.0", # For Vertext AI Session Service
54+
"python-dotenv>=1.0.0, <2.0.0", # To manage environment variables
5555
"requests>=2.32.4, <3.0.0",
56-
"sqlalchemy-spanner>=1.14.0", # Spanner database session service
57-
"sqlalchemy>=2.0, <3.0.0", # SQL database ORM
58-
"starlette>=0.46.2, <1.0.0", # For FastAPI CLI
59-
"tenacity>=8.0.0, <9.0.0", # For Retry management
56+
"sqlalchemy-spanner>=1.14.0", # Spanner database session service
57+
"sqlalchemy>=2.0, <3.0.0", # SQL database ORM
58+
"starlette>=0.46.2, <1.0.0", # For FastAPI CLI
59+
"tenacity>=8.0.0, <9.0.0", # For Retry management
6060
"typing-extensions>=4.5, <5",
61-
"tzlocal>=5.3, <6.0", # Time zone utilities
62-
"uvicorn>=0.34.0, <1.0.0", # ASGI server for FastAPI
63-
"watchdog>=6.0.0, <7.0.0", # For file change detection and hot reload
64-
"websockets>=15.0.1, <16.0.0", # For BaseLlmFlow
61+
"tzlocal>=5.3, <6.0", # Time zone utilities
62+
"uvicorn>=0.34.0, <1.0.0", # ASGI server for FastAPI
63+
"watchdog>=6.0.0, <7.0.0", # For file change detection and hot reload
64+
"websockets>=15.0.1, <16.0.0", # For BaseLlmFlow
6565
# go/keep-sorted end
6666
]
6767
dynamic = ["version"]
@@ -111,13 +111,13 @@ eval = [
111111
test = [
112112
# go/keep-sorted start
113113
"a2a-sdk>=0.3.0,<0.4.0;python_version>='3.10'",
114-
"anthropic>=0.43.0", # For anthropic model tests
115-
"kubernetes>=29.0.0", # For GkeCodeExecutor
114+
"anthropic>=0.43.0", # For anthropic model tests
115+
"kubernetes>=29.0.0", # For GkeCodeExecutor
116116
"langchain-community>=0.3.17",
117-
"langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
118-
"litellm>=1.75.5, <2.0.0", # For LiteLLM tests
119-
"llama-index-readers-file>=0.4.0", # For retrieval tests
120-
"openai>=1.100.2", # For LiteLLM
117+
"langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
118+
"litellm>=1.75.5, <2.0.0", # For LiteLLM tests
119+
"llama-index-readers-file>=0.4.0", # For retrieval tests
120+
"openai>=1.100.2", # For LiteLLM
121121
"pytest>=8.3.4",
122122
"pytest-asyncio>=0.25.0",
123123
"pytest-mock>=3.14.0",
@@ -139,22 +139,20 @@ docs = [
139139

140140
# Optional extensions
141141
extensions = [
142-
"anthropic>=0.43.0", # For anthropic model support
143-
"beautifulsoup4>=3.2.2", # For load_web_page tool.
144-
"crewai[tools];python_version>='3.10'", # For CrewaiTool
145-
"docker>=7.0.0", # For ContainerCodeExecutor
146-
"kubernetes>=29.0.0", # For GkeCodeExecutor
147-
"langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
148-
"litellm>=1.75.5", # For LiteLlm class. Currently has OpenAI limitations. TODO: once LiteLlm fix it
149-
"llama-index-readers-file>=0.4.0", # For retrieval using LlamaIndex.
150-
"llama-index-embeddings-google-genai>=0.3.0",# For files retrieval using LlamaIndex.
151-
"lxml>=5.3.0", # For load_web_page tool.
152-
"toolbox-core>=0.1.0", # For tools.toolbox_toolset.ToolboxToolset
142+
"anthropic>=0.43.0", # For anthropic model support
143+
"beautifulsoup4>=3.2.2", # For load_web_page tool.
144+
"crewai[tools];python_version>='3.10'", # For CrewaiTool
145+
"docker>=7.0.0", # For ContainerCodeExecutor
146+
"kubernetes>=29.0.0", # For GkeCodeExecutor
147+
"langgraph>=0.2.60, <0.4.8", # For LangGraphAgent
148+
"litellm>=1.75.5", # For LiteLlm class. Currently has OpenAI limitations. TODO: once LiteLlm fix it
149+
"llama-index-readers-file>=0.4.0", # For retrieval using LlamaIndex.
150+
"llama-index-embeddings-google-genai>=0.3.0", # For files retrieval using LlamaIndex.
151+
"lxml>=5.3.0", # For load_web_page tool.
152+
"toolbox-core>=0.1.0", # For tools.toolbox_toolset.ToolboxToolset
153153
]
154154

155-
otel-gcp = [
156-
"opentelemetry-instrumentation-google-genai>=0.3b0, <1.0.0",
157-
]
155+
otel-gcp = ["opentelemetry-instrumentation-google-genai>=0.3b0, <1.0.0"]
158156

159157

160158
[tool.pyink]

src/google/adk/agents/base_agent.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,8 +158,8 @@ class MyAgent(BaseAgent):
158158
159159
Returns:
160160
Optional[types.Content]: The content to return to the user.
161-
When the content is present, the provided content will be used as agent
162-
response and appended to event history as agent response.
161+
When the content is present, an additional event with the provided content
162+
will be appended to event history as an additional agent response.
163163
"""
164164

165165
def _load_agent_state(

src/google/adk/flows/llm_flows/functions.py

Lines changed: 96 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -275,20 +275,38 @@ async def _execute_single_function_call_async(
275275
tool_confirmation: Optional[ToolConfirmation] = None,
276276
) -> Optional[Event]:
277277
"""Execute a single function call with thread safety for state modifications."""
278-
tool, tool_context = _get_tool_and_context(
279-
invocation_context,
280-
function_call,
281-
tools_dict,
282-
tool_confirmation,
278+
# Do not use "args" as the variable name, because it is a reserved keyword
279+
# in python debugger.
280+
# Make a deep copy to avoid being modified.
281+
function_args = (
282+
copy.deepcopy(function_call.args) if function_call.args else {}
283283
)
284284

285-
with tracer.start_as_current_span(f'execute_tool {tool.name}'):
286-
# Do not use "args" as the variable name, because it is a reserved keyword
287-
# in python debugger.
288-
# Make a deep copy to avoid being modified.
289-
function_args = (
290-
copy.deepcopy(function_call.args) if function_call.args else {}
285+
tool_context = _create_tool_context(
286+
invocation_context, function_call, tool_confirmation
287+
)
288+
289+
try:
290+
tool = _get_tool(function_call, tools_dict)
291+
except ValueError as tool_error:
292+
tool = BaseTool(name=function_call.name, description='Tool not found')
293+
error_response = (
294+
await invocation_context.plugin_manager.run_on_tool_error_callback(
295+
tool=tool,
296+
tool_args=function_args,
297+
tool_context=tool_context,
298+
error=tool_error,
299+
)
291300
)
301+
if error_response is not None:
302+
return __build_response_event(
303+
tool, error_response, tool_context, invocation_context
304+
)
305+
else:
306+
raise tool_error
307+
308+
async def _run_with_trace():
309+
nonlocal function_args
292310

293311
# Step 1: Check if plugin before_tool_callback overrides the function
294312
# response.
@@ -375,13 +393,23 @@ async def _execute_single_function_call_async(
375393
function_response_event = __build_response_event(
376394
tool, function_response, tool_context, invocation_context
377395
)
378-
trace_tool_call(
379-
tool=tool,
380-
args=function_args,
381-
function_response_event=function_response_event,
382-
)
383396
return function_response_event
384397

398+
with tracer.start_as_current_span(f'execute_tool {tool.name}'):
399+
try:
400+
function_response_event = await _run_with_trace()
401+
trace_tool_call(
402+
tool=tool,
403+
args=function_args,
404+
function_response_event=function_response_event,
405+
)
406+
return function_response_event
407+
except:
408+
trace_tool_call(
409+
tool=tool, args=function_args, function_response_event=None
410+
)
411+
raise
412+
385413

386414
async def handle_function_calls_live(
387415
invocation_context: InvocationContext,
@@ -451,13 +479,17 @@ async def _execute_single_function_call_live(
451479
tool, tool_context = _get_tool_and_context(
452480
invocation_context, function_call, tools_dict
453481
)
454-
with tracer.start_as_current_span(f'execute_tool {tool.name}'):
482+
483+
function_args = (
484+
copy.deepcopy(function_call.args) if function_call.args else {}
485+
)
486+
487+
async def _run_with_trace():
488+
nonlocal function_args
489+
455490
# Do not use "args" as the variable name, because it is a reserved keyword
456491
# in python debugger.
457492
# Make a deep copy to avoid being modified.
458-
function_args = (
459-
copy.deepcopy(function_call.args) if function_call.args else {}
460-
)
461493
function_response = None
462494

463495
# Handle before_tool_callbacks - iterate through the canonical callback
@@ -511,13 +543,23 @@ async def _execute_single_function_call_live(
511543
function_response_event = __build_response_event(
512544
tool, function_response, tool_context, invocation_context
513545
)
514-
trace_tool_call(
515-
tool=tool,
516-
args=function_args,
517-
function_response_event=function_response_event,
518-
)
519546
return function_response_event
520547

548+
with tracer.start_as_current_span(f'execute_tool {tool.name}'):
549+
try:
550+
function_response_event = await _run_with_trace()
551+
trace_tool_call(
552+
tool=tool,
553+
args=function_args,
554+
function_response_event=function_response_event,
555+
)
556+
return function_response_event
557+
except:
558+
trace_tool_call(
559+
tool=tool, args=function_args, function_response_event=None
560+
)
561+
raise
562+
521563

522564
async def _process_function_live_helper(
523565
tool,
@@ -639,24 +681,45 @@ async def run_tool_and_update_queue(tool, function_args, tool_context):
639681
return function_response
640682

641683

642-
def _get_tool_and_context(
643-
invocation_context: InvocationContext,
644-
function_call: types.FunctionCall,
645-
tools_dict: dict[str, BaseTool],
646-
tool_confirmation: Optional[ToolConfirmation] = None,
684+
def _get_tool(
685+
function_call: types.FunctionCall, tools_dict: dict[str, BaseTool]
647686
):
687+
"""Returns the tool corresponding to the function call."""
648688
if function_call.name not in tools_dict:
649689
raise ValueError(
650-
f'Function {function_call.name} is not found in the tools_dict.'
690+
f'Function {function_call.name} is not found in the tools_dict:'
691+
f' {tools_dict.keys()}.'
651692
)
652693

653-
tool_context = ToolContext(
694+
return tools_dict[function_call.name]
695+
696+
697+
def _create_tool_context(
698+
invocation_context: InvocationContext,
699+
function_call: types.FunctionCall,
700+
tool_confirmation: Optional[ToolConfirmation] = None,
701+
):
702+
"""Creates a ToolContext object."""
703+
return ToolContext(
654704
invocation_context=invocation_context,
655705
function_call_id=function_call.id,
656706
tool_confirmation=tool_confirmation,
657707
)
658708

659-
tool = tools_dict[function_call.name]
709+
710+
def _get_tool_and_context(
711+
invocation_context: InvocationContext,
712+
function_call: types.FunctionCall,
713+
tools_dict: dict[str, BaseTool],
714+
tool_confirmation: Optional[ToolConfirmation] = None,
715+
):
716+
"""Returns the tool and tool context corresponding to the function call."""
717+
tool = _get_tool(function_call, tools_dict)
718+
tool_context = _create_tool_context(
719+
invocation_context,
720+
function_call,
721+
tool_confirmation,
722+
)
660723

661724
return (tool, tool_context)
662725

src/google/adk/telemetry/tracing.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
import json
2727
import os
2828
from typing import Any
29+
from typing import Optional
2930
from typing import TYPE_CHECKING
3031

3132
from google.genai import types
@@ -118,7 +119,7 @@ def trace_agent_invocation(
118119
def trace_tool_call(
119120
tool: BaseTool,
120121
args: dict[str, Any],
121-
function_response_event: Event,
122+
function_response_event: Optional[Event],
122123
):
123124
"""Traces tool call.
124125
@@ -154,7 +155,8 @@ def trace_tool_call(
154155
tool_call_id = '<not specified>'
155156
tool_response = '<not specified>'
156157
if (
157-
function_response_event.content is not None
158+
function_response_event is not None
159+
and function_response_event.content is not None
158160
and function_response_event.content.parts
159161
):
160162
response_parts = function_response_event.content.parts
@@ -169,7 +171,8 @@ def trace_tool_call(
169171

170172
if not isinstance(tool_response, dict):
171173
tool_response = {'result': tool_response}
172-
span.set_attribute('gcp.vertex.agent.event_id', function_response_event.id)
174+
if function_response_event is not None:
175+
span.set_attribute('gcp.vertex.agent.event_id', function_response_event.id)
173176
if _should_add_request_response_to_spans():
174177
span.set_attribute(
175178
'gcp.vertex.agent.tool_response',

0 commit comments

Comments
 (0)