Skip to content

Commit 1e6a9da

Browse files
GWealecopybara-github
authored andcommitted
fix: Change instruction insertion to respect tool call/response pairs
Make sure _add_instructions_to_user_content skips over user messages that carry function_response parts so tool_use/tool_result blocks stay together Close #3229 Co-authored-by: George Weale <gweale@google.com> PiperOrigin-RevId: 826076141
1 parent d3796f9 commit 1e6a9da

File tree

2 files changed

+50
-4
lines changed

2 files changed

+50
-4
lines changed

src/google/adk/flows/llm_flows/contents.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -668,6 +668,16 @@ def _is_live_model_audio_event(event: Event) -> bool:
668668
return False
669669

670670

671+
def _content_contains_function_response(content: types.Content) -> bool:
672+
"""Checks whether the content includes any function response parts."""
673+
if not content.parts:
674+
return False
675+
for part in content.parts:
676+
if part.function_response:
677+
return True
678+
return False
679+
680+
671681
async def _add_instructions_to_user_content(
672682
invocation_context: InvocationContext,
673683
llm_request: LlmRequest,
@@ -695,13 +705,14 @@ async def _add_instructions_to_user_content(
695705

696706
if llm_request.contents:
697707
for i in range(len(llm_request.contents) - 1, -1, -1):
698-
if llm_request.contents[i].role != 'user':
708+
content = llm_request.contents[i]
709+
if content.role != 'user':
699710
insert_index = i + 1
700711
break
701-
elif i == 0:
702-
# All content from start is user content
703-
insert_index = 0
712+
if _content_contains_function_response(content):
713+
insert_index = i + 1
704714
break
715+
insert_index = i
705716
else:
706717
# No contents remaining, just append at the end
707718
insert_index = 0

tests/unittests/flows/llm_flows/test_instructions.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -923,6 +923,41 @@ async def test_no_dynamic_instructions_when_no_static(llm_backend):
923923
assert llm_request.contents[0].parts[0].text == "Hello world"
924924

925925

926+
@pytest.mark.asyncio
927+
async def test_instructions_insert_after_function_response():
928+
"""Ensure instruction insertion does not split tool_use/tool_result pairs."""
929+
agent = LlmAgent(name="test_agent")
930+
invocation_context = await _create_invocation_context(agent)
931+
932+
tool_call = types.Part.from_function_call(
933+
name="echo_tool", args={"echo": "value"}
934+
)
935+
tool_response = types.Part.from_function_response(
936+
name="echo_tool", response={"result": "value"}
937+
)
938+
939+
llm_request = LlmRequest(
940+
contents=[
941+
types.Content(role="assistant", parts=[tool_call]),
942+
types.Content(role="user", parts=[tool_response]),
943+
]
944+
)
945+
instruction_contents = [
946+
types.Content(
947+
role="user", parts=[types.Part.from_text(text="Dynamic instruction")]
948+
)
949+
]
950+
951+
await _add_instructions_to_user_content(
952+
invocation_context, llm_request, instruction_contents
953+
)
954+
955+
assert len(llm_request.contents) == 3
956+
assert llm_request.contents[0].parts[0].function_call
957+
assert llm_request.contents[1].parts[0].function_response
958+
assert llm_request.contents[2].parts[0].text == "Dynamic instruction"
959+
960+
926961
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
927962
@pytest.mark.asyncio
928963
async def test_static_instruction_with_files_and_text(llm_backend):

0 commit comments

Comments
 (0)