Skip to content

Commit b0b82bd

Browse files
rbrenopenhands-agentenyst
authored
Simplify hello world example (#1047)
Co-authored-by: openhands <openhands@all-hands.dev> Co-authored-by: enyst <engel.nyst@gmail.com>
1 parent 2bb7c89 commit b0b82bd

File tree

9 files changed

+108
-50
lines changed

9 files changed

+108
-50
lines changed

.github/workflows/run-examples.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ jobs:
5252
run: |
5353
# List of examples to test
5454
# Excluded examples:
55+
# - 01_hello_world.py: requires LiteLLM proxy URL (OPENAI_BASE_URL) not set in CI
5556
# - 04_confirmation_mode_example.py: requires user input
5657
# - 06_interactive_terminal_w_reasoning.py: interactive terminal
5758
# - 08_mcp_with_oauth.py: requires OAuth setup
@@ -61,7 +62,6 @@ jobs:
6162
# - 04_vscode_with_docker_sandboxed_server.py: requires VSCode setup
6263
set -e
6364
EXAMPLES=(
64-
"examples/01_standalone_sdk/01_hello_world.py"
6565
"examples/01_standalone_sdk/02_custom_tools.py"
6666
"examples/01_standalone_sdk/03_activate_skill.py"
6767
"examples/01_standalone_sdk/05_use_llm_registry.py"
Lines changed: 16 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,28 @@
11
import os
22

3-
from pydantic import SecretStr
3+
from openhands.sdk import LLM, Agent, Conversation, Tool
4+
from openhands.tools.execute_bash import BashTool
5+
from openhands.tools.file_editor import FileEditorTool
6+
from openhands.tools.task_tracker import TaskTrackerTool
47

5-
from openhands.sdk import LLM, Conversation
6-
from openhands.tools.preset.default import get_default_agent
78

8-
9-
# Configure LLM and agent
10-
# You can get an API key from https://app.all-hands.dev/settings/api-keys
11-
api_key = os.getenv("LLM_API_KEY")
12-
assert api_key is not None, "LLM_API_KEY environment variable is not set."
13-
model = os.getenv("LLM_MODEL", "openhands/claude-sonnet-4-5-20250929")
14-
base_url = os.getenv("LLM_BASE_URL")
159
llm = LLM(
16-
model=model,
17-
api_key=SecretStr(api_key),
18-
base_url=base_url,
19-
usage_id="agent",
10+
model="anthropic/claude-sonnet-4-5-20250929",
11+
api_key=os.getenv("LLM_API_KEY"),
12+
)
13+
14+
agent = Agent(
15+
llm=llm,
16+
tools=[
17+
Tool(name=BashTool.name),
18+
Tool(name=FileEditorTool.name),
19+
Tool(name=TaskTrackerTool.name),
20+
],
2021
)
21-
agent = get_default_agent(llm=llm, cli_mode=True)
2222

23-
# Start a conversation and send some messages
2423
cwd = os.getcwd()
2524
conversation = Conversation(agent=agent, workspace=cwd)
2625

27-
# Send a message and let the agent run
2826
conversation.send_message("Write 3 facts about the current project into FACTS.txt.")
2927
conversation.run()
30-
31-
# Report cost
32-
cost = llm.metrics.accumulated_cost
33-
print(f"EXAMPLE_COST: {cost}")
28+
print("All done!")

openhands-sdk/openhands/sdk/llm/llm.py

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -123,14 +123,14 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
123123
# Config fields
124124
# =========================================================================
125125
model: str = Field(default="claude-sonnet-4-20250514", description="Model name.")
126-
api_key: SecretStr | None = Field(default=None, description="API key.")
126+
api_key: str | SecretStr | None = Field(default=None, description="API key.")
127127
base_url: str | None = Field(default=None, description="Custom base URL.")
128128
api_version: str | None = Field(
129129
default=None, description="API version (e.g., Azure)."
130130
)
131131

132-
aws_access_key_id: SecretStr | None = Field(default=None)
133-
aws_secret_access_key: SecretStr | None = Field(default=None)
132+
aws_access_key_id: str | SecretStr | None = Field(default=None)
133+
aws_secret_access_key: str | SecretStr | None = Field(default=None)
134134
aws_region_name: str | None = Field(default=None)
135135

136136
openrouter_site_url: str = Field(default="https://docs.all-hands.dev/")
@@ -296,7 +296,7 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
296296
# =========================================================================
297297
@field_validator("api_key", "aws_access_key_id", "aws_secret_access_key")
298298
@classmethod
299-
def _validate_secrets(cls, v: SecretStr | None, info):
299+
def _validate_secrets(cls, v: str | SecretStr | None, info) -> SecretStr | None:
300300
return validate_secret(v, info)
301301

302302
@model_validator(mode="before")
@@ -342,8 +342,10 @@ def _set_env_side_effects(self):
342342
if self.openrouter_app_name:
343343
os.environ["OR_APP_NAME"] = self.openrouter_app_name
344344
if self.aws_access_key_id:
345+
assert isinstance(self.aws_access_key_id, SecretStr)
345346
os.environ["AWS_ACCESS_KEY_ID"] = self.aws_access_key_id.get_secret_value()
346347
if self.aws_secret_access_key:
348+
assert isinstance(self.aws_secret_access_key, SecretStr)
347349
os.environ["AWS_SECRET_ACCESS_KEY"] = (
348350
self.aws_secret_access_key.get_secret_value()
349351
)
@@ -633,14 +635,18 @@ def _one_attempt(**retry_kwargs) -> ResponsesAPIResponse:
633635
typed_input: ResponseInputParam | str = (
634636
cast(ResponseInputParam, input_items) if input_items else ""
635637
)
638+
# Extract api_key value with type assertion for type checker
639+
api_key_value: str | None = None
640+
if self.api_key:
641+
assert isinstance(self.api_key, SecretStr)
642+
api_key_value = self.api_key.get_secret_value()
643+
636644
ret = litellm_responses(
637645
model=self.model,
638646
input=typed_input,
639647
instructions=instructions,
640648
tools=resp_tools,
641-
api_key=self.api_key.get_secret_value()
642-
if self.api_key
643-
else None,
649+
api_key=api_key_value,
644650
api_base=self.base_url,
645651
api_version=self.api_version,
646652
timeout=self.timeout,
@@ -708,10 +714,16 @@ def _transport_call(
708714
"ignore",
709715
category=UserWarning,
710716
)
717+
# Extract api_key value with type assertion for type checker
718+
api_key_value: str | None = None
719+
if self.api_key:
720+
assert isinstance(self.api_key, SecretStr)
721+
api_key_value = self.api_key.get_secret_value()
722+
711723
# Some providers need renames handled in _normalize_call_kwargs.
712724
ret = litellm_completion(
713725
model=self.model,
714-
api_key=self.api_key.get_secret_value() if self.api_key else None,
726+
api_key=api_key_value,
715727
api_base=self.base_url,
716728
api_version=self.api_version,
717729
timeout=self.timeout,
@@ -755,7 +767,11 @@ def _init_model_info_and_caps(self) -> None:
755767
base_url = "http://" + base_url
756768
try:
757769
headers = {}
758-
api_key = self.api_key.get_secret_value() if self.api_key else ""
770+
# Extract api_key value with type assertion for type checker
771+
api_key = ""
772+
if self.api_key:
773+
assert isinstance(self.api_key, SecretStr)
774+
api_key = self.api_key.get_secret_value()
759775
if api_key:
760776
headers["Authorization"] = f"Bearer {api_key}"
761777

openhands-sdk/openhands/sdk/utils/pydantic_secrets.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,13 @@ def serialize_secret(v: SecretStr | None, info):
2828
return v
2929

3030

31-
def validate_secret(v: SecretStr | None, info):
31+
def validate_secret(v: str | SecretStr | None, info) -> SecretStr | None:
3232
"""
3333
Deserialize secret fields, handling encryption and empty values.
3434
35+
Accepts both str and SecretStr inputs, always returns SecretStr | None.
3536
- Empty secrets are converted to None
37+
- Plain strings are converted to SecretStr
3638
- If a cipher is provided in context, attempts to decrypt the value
3739
- If decryption fails, the cipher returns None and a warning is logged
3840
- This gracefully handles conversations encrypted with different keys or were redacted
@@ -55,4 +57,8 @@ def validate_secret(v: SecretStr | None, info):
5557
cipher: Cipher = info.context.get("cipher")
5658
return cipher.decrypt(secret_value)
5759

58-
return v
60+
# Always return SecretStr
61+
if isinstance(v, SecretStr):
62+
return v
63+
else:
64+
return SecretStr(secret_value)

tests/cross/test_agent_reconciliation.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,11 @@ def test_conversation_restart_with_nested_llms(tmp_path):
5050
# Verify the conversation was created successfully
5151
assert conversation1.id == conversation_id
5252
assert conversation1.agent.llm.api_key is not None
53+
assert isinstance(conversation1.agent.llm.api_key, SecretStr)
5354
assert conversation1.agent.llm.api_key.get_secret_value() == "llm-api-key"
5455
assert isinstance(conversation1.agent.condenser, LLMSummarizingCondenser)
5556
assert conversation1.agent.condenser.llm.api_key is not None
57+
assert isinstance(conversation1.agent.condenser.llm.api_key, SecretStr)
5658
assert conversation1.agent.condenser.llm.api_key.get_secret_value() == "llm-api-key"
5759

5860
# Attempt to restart the conversation - this should work without errors
@@ -65,9 +67,11 @@ def test_conversation_restart_with_nested_llms(tmp_path):
6567
# Make sure the conversation gets initialized properly with no errors
6668
assert conversation2.id == conversation_id
6769
assert conversation2.agent.llm.api_key is not None
70+
assert isinstance(conversation2.agent.llm.api_key, SecretStr)
6871
assert conversation2.agent.llm.api_key.get_secret_value() == "llm-api-key"
6972
assert isinstance(conversation2.agent.condenser, LLMSummarizingCondenser)
7073
assert conversation2.agent.condenser.llm.api_key is not None
74+
assert isinstance(conversation2.agent.condenser.llm.api_key, SecretStr)
7175
assert conversation2.agent.condenser.llm.api_key.get_secret_value() == "llm-api-key"
7276

7377
# Verify that the agent configuration is properly reconciled

tests/sdk/config/test_llm_config.py

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,9 @@ def test_llm_config_custom_values():
8484
)
8585

8686
assert config.model == "gpt-4"
87-
assert (
88-
config.api_key is not None and config.api_key.get_secret_value() == "test-key"
89-
)
87+
assert config.api_key is not None
88+
assert isinstance(config.api_key, SecretStr)
89+
assert config.api_key.get_secret_value() == "test-key"
9090
assert config.base_url == "https://api.example.com"
9191
assert config.api_version == "v1"
9292
assert config.num_retries == 3
@@ -122,9 +122,9 @@ def test_llm_config_custom_values():
122122
def test_llm_config_secret_str():
123123
"""Test that api_key is properly handled as SecretStr."""
124124
config = LLM(model="gpt-4", api_key=SecretStr("secret-key"), usage_id="test-llm")
125-
assert (
126-
config.api_key is not None and config.api_key.get_secret_value() == "secret-key"
127-
)
125+
assert config.api_key is not None
126+
assert isinstance(config.api_key, SecretStr)
127+
assert config.api_key.get_secret_value() == "secret-key"
128128
# Ensure the secret is not exposed in string representation
129129
assert "secret-key" not in str(config)
130130

@@ -138,14 +138,12 @@ def test_llm_config_aws_credentials():
138138
aws_secret_access_key=SecretStr("test-secret-key"),
139139
aws_region_name="us-east-1",
140140
)
141-
assert (
142-
config.aws_access_key_id is not None
143-
and config.aws_access_key_id.get_secret_value() == "test-access-key"
144-
)
145-
assert (
146-
config.aws_secret_access_key is not None
147-
and config.aws_secret_access_key.get_secret_value() == "test-secret-key"
148-
)
141+
assert config.aws_access_key_id is not None
142+
assert isinstance(config.aws_access_key_id, SecretStr)
143+
assert config.aws_access_key_id.get_secret_value() == "test-access-key"
144+
assert config.aws_secret_access_key is not None
145+
assert isinstance(config.aws_secret_access_key, SecretStr)
146+
assert config.aws_secret_access_key.get_secret_value() == "test-secret-key"
149147
assert config.aws_region_name == "us-east-1"
150148

151149

tests/sdk/conversation/local/test_state_serialization.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -540,6 +540,7 @@ def test_conversation_with_agent_different_llm_config():
540540
)
541541

542542
assert new_conversation._state.agent.llm.api_key is not None
543+
assert isinstance(new_conversation._state.agent.llm.api_key, SecretStr)
543544
assert new_conversation._state.agent.llm.api_key.get_secret_value() == "new-key"
544545
# Test that the core state structure is preserved (excluding agent differences)
545546
new_dump = new_conversation._state.model_dump(mode="json", exclude={"agent"})

tests/sdk/llm/test_api_key_validation.py

Lines changed: 35 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ def test_valid_api_key_preserved():
2727
"""Test that valid API keys are preserved."""
2828
llm = LLM(model="gpt-4", api_key=SecretStr("valid-key"), usage_id="test-llm")
2929
assert llm.api_key is not None
30+
assert isinstance(llm.api_key, SecretStr)
3031
assert llm.api_key.get_secret_value() == "valid-key"
3132

3233

@@ -43,9 +44,9 @@ def test_none_api_key_preserved():
4344
def test_empty_string_direct_input():
4445
"""Test that empty string passed directly (not as SecretStr) is converted to None.""" # noqa: E501
4546
# This tests the case where someone might pass a string directly
46-
# Note: This would normally cause a validation error, but we handle it in field validator # noqa: E501
47+
# The field validator now accepts str and converts it to SecretStr
4748
data = {"model": "bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0", "api_key": ""}
48-
llm = LLM(**data, usage_id="test-llm") # type: ignore[arg-type]
49+
llm = LLM(**data, usage_id="test-llm") # pyright: ignore[reportArgumentType]
4950
assert llm.api_key is None
5051

5152

@@ -55,7 +56,7 @@ def test_whitespace_string_direct_input():
5556
"model": "bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
5657
"api_key": " \t\n ",
5758
}
58-
llm = LLM(**data, usage_id="test-llm") # type: ignore[arg-type]
59+
llm = LLM(**data, usage_id="test-llm") # pyright: ignore[reportArgumentType]
5960
assert llm.api_key is None
6061

6162

@@ -75,6 +76,7 @@ def test_non_bedrock_model_with_valid_key():
7576
"""Test that non-Bedrock models work normally with valid API keys."""
7677
llm = LLM(model="gpt-4", api_key=SecretStr("valid-openai-key"), usage_id="test-llm")
7778
assert llm.api_key is not None
79+
assert isinstance(llm.api_key, SecretStr)
7880
assert llm.api_key.get_secret_value() == "valid-openai-key"
7981

8082

@@ -90,7 +92,37 @@ def test_aws_credentials_handling():
9092
)
9193
assert llm.api_key is None
9294
assert llm.aws_access_key_id is not None
95+
assert isinstance(llm.aws_access_key_id, SecretStr)
9396
assert llm.aws_access_key_id.get_secret_value() == "test-access-key"
9497
assert llm.aws_secret_access_key is not None
98+
assert isinstance(llm.aws_secret_access_key, SecretStr)
9599
assert llm.aws_secret_access_key.get_secret_value() == "test-secret-key"
96100
assert llm.aws_region_name == "us-west-2"
101+
102+
103+
def test_plain_string_api_key():
104+
"""Test that plain string API keys are converted to SecretStr."""
105+
llm = LLM(model="gpt-4", api_key="my-plain-string-key", usage_id="test-llm")
106+
assert llm.api_key is not None
107+
assert isinstance(llm.api_key, SecretStr)
108+
assert llm.api_key.get_secret_value() == "my-plain-string-key"
109+
110+
111+
def test_plain_string_aws_credentials():
112+
"""Test that plain string AWS credentials are converted to SecretStr."""
113+
llm = LLM(
114+
usage_id="test-llm",
115+
model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
116+
api_key=None,
117+
aws_access_key_id="plain-access-key",
118+
aws_secret_access_key="plain-secret-key",
119+
aws_region_name="us-west-2",
120+
)
121+
assert llm.api_key is None
122+
assert llm.aws_access_key_id is not None
123+
assert isinstance(llm.aws_access_key_id, SecretStr)
124+
assert llm.aws_access_key_id.get_secret_value() == "plain-access-key"
125+
assert llm.aws_secret_access_key is not None
126+
assert isinstance(llm.aws_secret_access_key, SecretStr)
127+
assert llm.aws_secret_access_key.get_secret_value() == "plain-secret-key"
128+
assert llm.aws_region_name == "us-west-2"

tests/sdk/llm/test_llm_json_storage.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,12 @@ def test_llm_store_and_load_json():
5151
assert original_llm.api_key is not None
5252
assert original_llm.aws_access_key_id is not None
5353
assert original_llm.aws_secret_access_key is not None
54+
assert isinstance(loaded_llm.api_key, SecretStr)
55+
assert isinstance(original_llm.api_key, SecretStr)
56+
assert isinstance(loaded_llm.aws_access_key_id, SecretStr)
57+
assert isinstance(original_llm.aws_access_key_id, SecretStr)
58+
assert isinstance(loaded_llm.aws_secret_access_key, SecretStr)
59+
assert isinstance(original_llm.aws_secret_access_key, SecretStr)
5460
assert (
5561
loaded_llm.api_key.get_secret_value()
5662
== original_llm.api_key.get_secret_value()

0 commit comments

Comments
 (0)