Skip to content

Commit 56178ea

Browse files
committed
fix: plumb thinking blocks between litellm and gen ai sdk parts
1 parent b0017ae commit 56178ea

File tree

1 file changed

+55
-12
lines changed

1 file changed

+55
-12
lines changed

src/google/adk/models/lite_llm.py

Lines changed: 55 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -247,13 +247,15 @@ def _content_to_message_param(
247247

248248
# Handle user or assistant messages
249249
role = _to_litellm_role(content.role)
250-
message_content = _get_content(content.parts) or None
251250

252251
if role == "user":
252+
message_content = _get_content(content.parts) or None
253253
return ChatCompletionUserMessage(role="user", content=message_content)
254254
else: # assistant/model
255255
tool_calls = []
256-
content_present = False
256+
thinking_blocks = []
257+
other_parts = []
258+
257259
for part in content.parts:
258260
if part.function_call:
259261
tool_calls.append(
@@ -266,23 +268,40 @@ def _content_to_message_param(
266268
),
267269
)
268270
)
269-
elif part.text or part.inline_data:
270-
content_present = True
271+
elif part.thought:
272+
if (
273+
part.thought_signature
274+
and part.thought_signature.decode("utf-8") == "redacted_thinking"
275+
):
276+
thinking_block = {
277+
"type": "redacted_thinking",
278+
"data": part.text,
279+
}
280+
else:
281+
thinking_block = {"type": "thinking"}
282+
if part.thought_signature:
283+
thinking_block["signature"] = part.thought_signature.decode("utf-8")
284+
if part.text:
285+
thinking_block["thinking"] = part.text
286+
thinking_blocks.append(thinking_block)
287+
else:
288+
other_parts.append(part)
271289

272-
final_content = message_content if content_present else None
273-
if final_content and isinstance(final_content, list):
290+
message_content = _get_content(other_parts) or None
291+
if message_content and isinstance(message_content, list):
274292
# when the content is a single text object, we can use it directly.
275293
# this is needed for ollama_chat provider which fails if content is a list
276-
final_content = (
277-
final_content[0].get("text", "")
278-
if final_content[0].get("type", None) == "text"
279-
else final_content
294+
message_content = (
295+
message_content[0].get("text", "")
296+
if message_content[0].get("type", None) == "text"
297+
else message_content
280298
)
281299

282300
return ChatCompletionAssistantMessage(
283301
role=role,
284-
content=final_content,
302+
content=message_content,
285303
tool_calls=tool_calls or None,
304+
thinking_blocks=thinking_blocks or None,
286305
)
287306

288307

@@ -602,6 +621,31 @@ def _message_to_generate_content_response(
602621
if message.get("content", None):
603622
parts.append(types.Part.from_text(text=message.get("content")))
604623

624+
if message.get("thinking_blocks"):
625+
for block in message.get("thinking_blocks"):
626+
if block.get("type") == "thinking":
627+
signature = block.get("signature")
628+
thought = block.get("thinking")
629+
part = types.Part(
630+
thought=True,
631+
thought_signature=signature.encode("utf-8") if signature else None,
632+
text=thought,
633+
)
634+
parts.append(part)
635+
elif block.get("type") == "redacted_thinking":
636+
# Part doesn't have redacted thinking type
637+
# therefore use signature field to show redacted thinking
638+
signature="redacted_thinking"
639+
thought = block.get("data")
640+
part = types.Part(
641+
thought=True,
642+
thought_signature=signature.encode("utf-8") if signature else None,
643+
text=thought,
644+
)
645+
parts.append(part)
646+
else:
647+
logging.warning(f'ignoring unsupported thinking block type {type(block)}')
648+
605649
if message.get("tool_calls", None):
606650
for tool_call in message.get("tool_calls"):
607651
if tool_call.type == "function":
@@ -611,7 +655,6 @@ def _message_to_generate_content_response(
611655
)
612656
part.function_call.id = tool_call.id
613657
parts.append(part)
614-
615658
return LlmResponse(
616659
content=types.Content(role="model", parts=parts), partial=is_partial
617660
)

0 commit comments

Comments
 (0)