Skip to content

Commit 67364a2

Browse files
Reduce line-length from 500 to 88 and fix pre-commit errors (#68)
Co-authored-by: openhands <openhands@all-hands.dev>
1 parent 288e440 commit 67364a2

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+1937
-596
lines changed

examples/hello_world.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,14 @@ def conversation_callback(event: EventType):
6161
conversation.send_message(
6262
message=Message(
6363
role="user",
64-
content=[TextContent(text="Hello! Can you create a new Python file named hello.py that prints 'Hello, World!'?")],
64+
content=[
65+
TextContent(
66+
text=(
67+
"Hello! Can you create a new Python file named hello.py that "
68+
"prints 'Hello, World!'?"
69+
)
70+
)
71+
],
6572
)
6673
)
6774
conversation.run()

openhands/core/agent/base.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,8 @@ def init_state(
7373
state: ConversationState,
7474
on_event: ConversationCallbackType,
7575
) -> None:
76-
"""Initialize the empty conversation state to prepare the agent for user messages.
76+
"""Initialize the empty conversation state to prepare the agent for user
77+
messages.
7778
7879
Typically this involves adding system message
7980

openhands/core/agent/codeact_agent/codeact_agent.py

Lines changed: 99 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,23 @@
1111

1212
from openhands.core.context import EnvContext, render_system_message
1313
from openhands.core.conversation import ConversationCallbackType, ConversationState
14-
from openhands.core.event import ActionEvent, AgentErrorEvent, LLMConvertibleEvent, MessageEvent, ObservationEvent, SystemPromptEvent
14+
from openhands.core.event import (
15+
ActionEvent,
16+
AgentErrorEvent,
17+
LLMConvertibleEvent,
18+
MessageEvent,
19+
ObservationEvent,
20+
SystemPromptEvent,
21+
)
1522
from openhands.core.llm import LLM, Message, TextContent, get_llm_metadata
1623
from openhands.core.logger import get_logger
17-
from openhands.core.tool import BUILT_IN_TOOLS, ActionBase, FinishTool, ObservationBase, Tool
24+
from openhands.core.tool import (
25+
BUILT_IN_TOOLS,
26+
ActionBase,
27+
FinishTool,
28+
ObservationBase,
29+
Tool,
30+
)
1831

1932
from ..base import AgentBase
2033

@@ -32,10 +45,18 @@ def __init__(
3245
cli_mode: bool = True,
3346
) -> None:
3447
for tool in BUILT_IN_TOOLS:
35-
assert tool not in tools, f"{tool} is automatically included and should not be provided."
48+
assert tool not in tools, (
49+
f"{tool} is automatically included and should not be provided."
50+
)
3651
super().__init__(llm=llm, tools=tools + BUILT_IN_TOOLS, env_context=env_context)
3752

38-
self.system_message: TextContent = TextContent(text=render_system_message(prompt_dir=self.prompt_dir, system_prompt_filename=system_prompt_filename, cli_mode=cli_mode))
53+
self.system_message: TextContent = TextContent(
54+
text=render_system_message(
55+
prompt_dir=self.prompt_dir,
56+
system_prompt_filename=system_prompt_filename,
57+
cli_mode=cli_mode,
58+
)
59+
)
3960

4061
self.max_iterations: int = 10
4162

@@ -44,11 +65,16 @@ def init_state(
4465
state: ConversationState,
4566
on_event: ConversationCallbackType,
4667
) -> None:
47-
# TODO(openhands): we should add test to test this init_state will actually modify state in-place
68+
# TODO(openhands): we should add test to test this init_state will actually
69+
# modify state in-place
4870
messages = [e.to_llm_message() for e in state.events]
4971
if len(messages) == 0:
5072
# Prepare system message
51-
event = SystemPromptEvent(source="agent", system_prompt=self.system_message, tools=[t.to_openai_tool() for t in self.tools.values()])
73+
event = SystemPromptEvent(
74+
source="agent",
75+
system_prompt=self.system_message,
76+
tools=[t.to_openai_tool() for t in self.tools.values()],
77+
)
5278
on_event(event)
5379

5480
def step(
@@ -57,13 +83,22 @@ def step(
5783
on_event: ConversationCallbackType,
5884
) -> None:
5985
# Get LLM Response (Action)
60-
llm_convertible_events = cast(list[LLMConvertibleEvent], [e for e in state.events if isinstance(e, LLMConvertibleEvent)])
61-
_messages = self.llm.format_messages_for_llm(LLMConvertibleEvent.events_to_messages(llm_convertible_events))
86+
llm_convertible_events = cast(
87+
list[LLMConvertibleEvent],
88+
[e for e in state.events if isinstance(e, LLMConvertibleEvent)],
89+
)
90+
_messages = self.llm.format_messages_for_llm(
91+
LLMConvertibleEvent.events_to_messages(llm_convertible_events)
92+
)
6293
logger.debug(f"Sending messages to LLM: {json.dumps(_messages, indent=2)}")
6394
response: ModelResponse = self.llm.completion(
6495
messages=_messages,
6596
tools=[tool.to_openai_tool() for tool in self.tools.values()],
66-
extra_body={"metadata": get_llm_metadata(model_name=self.llm.config.model, agent_name=self.name)},
97+
extra_body={
98+
"metadata": get_llm_metadata(
99+
model_name=self.llm.config.model, agent_name=self.name
100+
)
101+
},
67102
)
68103
assert len(response.choices) == 1 and isinstance(response.choices[0], Choices)
69104
llm_message: LiteLLMMessage = response.choices[0].message # type: ignore
@@ -72,12 +107,24 @@ def step(
72107
if message.tool_calls and len(message.tool_calls) > 0:
73108
tool_call: ChatCompletionMessageToolCall
74109
if any(tc.type != "function" for tc in message.tool_calls):
75-
logger.warning("LLM returned tool calls but some are not of type 'function' - ignoring those")
110+
logger.warning(
111+
"LLM returned tool calls but some are not of type 'function' - "
112+
"ignoring those"
113+
)
76114

77-
tool_calls = [tool_call for tool_call in message.tool_calls if tool_call.type == "function"]
78-
assert len(tool_calls) > 0, "LLM returned tool calls but none are of type 'function'"
115+
tool_calls = [
116+
tool_call
117+
for tool_call in message.tool_calls
118+
if tool_call.type == "function"
119+
]
120+
assert len(tool_calls) > 0, (
121+
"LLM returned tool calls but none are of type 'function'"
122+
)
79123
if not all(isinstance(c, TextContent) for c in message.content):
80-
logger.warning("LLM returned tool calls but message content is not all TextContent - ignoring non-text content")
124+
logger.warning(
125+
"LLM returned tool calls but message content is not all "
126+
"TextContent - ignoring non-text content"
127+
)
81128

82129
# Generate unique batch ID for this LLM response
83130
thought_content = [c for c in message.content if isinstance(c, TextContent)]
@@ -89,7 +136,9 @@ def step(
89136
tool_call,
90137
llm_response_id=response.id,
91138
on_event=on_event,
92-
thought=thought_content if i == 0 else [], # Only first gets thought
139+
thought=thought_content
140+
if i == 0
141+
else [], # Only first gets thought
93142
)
94143
if action_event is None:
95144
continue
@@ -130,34 +179,62 @@ def _get_action_events(
130179

131180
# Validate arguments
132181
try:
133-
action: ActionBase = tool.action_type.model_validate(json.loads(tool_call.function.arguments))
182+
action: ActionBase = tool.action_type.model_validate(
183+
json.loads(tool_call.function.arguments)
184+
)
134185
except (json.JSONDecodeError, ValidationError) as e:
135-
err = f"Error validating args {tool_call.function.arguments} for tool '{tool.name}': {e}"
186+
err = (
187+
f"Error validating args {tool_call.function.arguments} for tool "
188+
f"'{tool.name}': {e}"
189+
)
136190
event = AgentErrorEvent(error=err)
137191
on_event(event)
138192
return
139193

140194
# Create one ActionEvent per action
141-
action_event = ActionEvent(action=action, thought=thought, tool_name=tool.name, tool_call_id=tool_call.id, tool_call=tool_call, llm_response_id=llm_response_id)
195+
action_event = ActionEvent(
196+
action=action,
197+
thought=thought,
198+
tool_name=tool.name,
199+
tool_call_id=tool_call.id,
200+
tool_call=tool_call,
201+
llm_response_id=llm_response_id,
202+
)
142203
on_event(action_event)
143204
return action_event
144205

145-
def _execute_action_events(self, state: ConversationState, action_event: ActionEvent, on_event: ConversationCallbackType):
206+
def _execute_action_events(
207+
self,
208+
state: ConversationState,
209+
action_event: ActionEvent,
210+
on_event: ConversationCallbackType,
211+
):
146212
"""Execute action events and update the conversation state.
147213
148-
It will call the tool's executor and update the state & call callback fn with the observation.
214+
It will call the tool's executor and update the state & call callback fn
215+
with the observation.
149216
"""
150217
tool = self.tools.get(action_event.tool_name, None)
151218
if tool is None:
152-
raise RuntimeError(f"Tool '{action_event.tool_name}' not found. This should not happen as it was checked earlier.")
219+
raise RuntimeError(
220+
f"Tool '{action_event.tool_name}' not found. This should not happen "
221+
"as it was checked earlier."
222+
)
153223

154224
# Execute actions!
155225
if tool.executor is None:
156226
raise RuntimeError(f"Tool '{tool.name}' has no executor")
157227
observation: ObservationBase = tool.executor(action_event.action)
158-
assert isinstance(observation, ObservationBase), f"Tool '{tool.name}' executor must return an ObservationBase"
228+
assert isinstance(observation, ObservationBase), (
229+
f"Tool '{tool.name}' executor must return an ObservationBase"
230+
)
159231

160-
obs_event = ObservationEvent(observation=observation, action_id=action_event.id, tool_name=tool.name, tool_call_id=action_event.tool_call.id)
232+
obs_event = ObservationEvent(
233+
observation=observation,
234+
action_id=action_event.id,
235+
tool_name=tool.name,
236+
tool_call_id=action_event.tool_call.id,
237+
)
161238
on_event(obs_event)
162239

163240
# Set conversation state

openhands/core/config/llm_config.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ class LLMConfig(BaseModel):
4646
reasoning_effort: The effort to put into reasoning. This is a string that can be one of 'low', 'medium', 'high', or 'none'. Can apply to all reasoning models.
4747
seed: The seed to use for the LLM.
4848
safety_settings: Safety settings for models that support them (like Mistral AI and Gemini).
49-
"""
49+
""" # noqa: E501
5050

5151
model: str = Field(default="claude-sonnet-4-20250514")
5252
api_key: SecretStr | None = Field(default=None)
@@ -63,7 +63,9 @@ class LLMConfig(BaseModel):
6363
retry_min_wait: int = Field(default=8)
6464
retry_max_wait: int = Field(default=64)
6565
timeout: int | None = Field(default=None)
66-
max_message_chars: int = Field(default=30_000) # maximum number of characters in an observation's content when sent to the llm
66+
max_message_chars: int = Field(
67+
default=30_000
68+
) # maximum number of characters in an observation's content when sent to the llm
6769
temperature: float = Field(default=0.0)
6870
top_p: float = Field(default=1.0)
6971
top_k: float | None = Field(default=None)
@@ -81,20 +83,25 @@ class LLMConfig(BaseModel):
8183
disable_stop_word: bool | None = Field(default=False)
8284
caching_prompt: bool = Field(default=True)
8385
log_completions: bool = Field(default=False)
84-
log_completions_folder: str = Field(default=os.path.join(ENV_LOG_DIR, "completions"))
86+
log_completions_folder: str = Field(
87+
default=os.path.join(ENV_LOG_DIR, "completions")
88+
)
8589
custom_tokenizer: str | None = Field(default=None)
8690
native_tool_calling: bool | None = Field(default=None)
8791
reasoning_effort: str | None = Field(default=None)
8892
seed: int | None = Field(default=None)
8993
safety_settings: list[dict[str, str]] | None = Field(
9094
default=None,
91-
description="Safety settings for models that support them (like Mistral AI and Gemini)",
95+
description=(
96+
"Safety settings for models that support them (like Mistral AI and Gemini)"
97+
),
9298
)
9399

94100
model_config = ConfigDict(extra="forbid")
95101

96102
def model_post_init(self, __context: Any) -> None:
97-
"""Post-initialization hook to assign OpenRouter-related variables to environment variables.
103+
"""Post-initialization hook to assign OpenRouter-related variables to
104+
environment variables.
98105
99106
This ensures that these values are accessible to litellm at runtime.
100107
"""
@@ -121,6 +128,8 @@ def model_post_init(self, __context: Any) -> None:
121128
if self.aws_access_key_id:
122129
os.environ["AWS_ACCESS_KEY_ID"] = self.aws_access_key_id.get_secret_value()
123130
if self.aws_secret_access_key:
124-
os.environ["AWS_SECRET_ACCESS_KEY"] = self.aws_secret_access_key.get_secret_value()
131+
os.environ["AWS_SECRET_ACCESS_KEY"] = (
132+
self.aws_secret_access_key.get_secret_value()
133+
)
125134
if self.aws_region_name:
126135
os.environ["AWS_REGION_NAME"] = self.aws_region_name

openhands/core/config/mcp_config.py

Lines changed: 35 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,10 @@ def validate_server_name(cls, v: str) -> str:
8989

9090
# Check for valid characters (alphanumeric, hyphens, underscores)
9191
if not re.match(r"^[a-zA-Z0-9_-]+$", v):
92-
raise ValueError("Server name can only contain letters, numbers, hyphens, and underscores")
92+
raise ValueError(
93+
"Server name can only contain letters, numbers, hyphens, and "
94+
"underscores"
95+
)
9396

9497
return v
9598

@@ -104,7 +107,10 @@ def validate_command(cls, v: str) -> str:
104107

105108
# Check that command doesn't contain spaces (should be a single executable)
106109
if " " in v:
107-
raise ValueError("Command should be a single executable without spaces (use arguments field for parameters)")
110+
raise ValueError(
111+
"Command should be a single executable without spaces (use "
112+
"arguments field for parameters)"
113+
)
108114

109115
return v
110116

@@ -131,7 +137,10 @@ def parse_args(cls, v) -> list[str]:
131137
return shlex.split(v)
132138
except ValueError as e:
133139
# If shlex parsing fails (e.g., unmatched quotes), provide clear error
134-
raise ValueError(f'Invalid argument format: {str(e)}. Use shell-like format, e.g., "arg1 arg2" or \'--config "value with spaces"\'')
140+
raise ValueError(
141+
f"Invalid argument format: {str(e)}. Use shell-like format, "
142+
f'e.g., "arg1 arg2" or \'--config "value with spaces"\''
143+
)
135144

136145
return v or []
137146

@@ -150,14 +159,20 @@ def parse_env(cls, v) -> dict[str, str]:
150159
continue
151160

152161
if "=" not in pair:
153-
raise ValueError(f"Environment variable '{pair}' must be in KEY=VALUE format")
162+
raise ValueError(
163+
f"Environment variable '{pair}' must be in KEY=VALUE format"
164+
)
154165

155166
key, value = pair.split("=", 1)
156167
key = key.strip()
157168
if not key:
158169
raise ValueError("Environment variable key cannot be empty")
159170
if not re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", key):
160-
raise ValueError(f"Invalid environment variable name '{key}'. Must start with letter or underscore, contain only alphanumeric characters and underscores")
171+
raise ValueError(
172+
f"Invalid environment variable name '{key}'. Must start with "
173+
f"letter or underscore, contain only alphanumeric characters "
174+
f"and underscores"
175+
)
161176

162177
env[key] = value
163178
return env
@@ -172,7 +187,12 @@ def __eq__(self, other):
172187
"""
173188
if not isinstance(other, MCPStdioServerConfig):
174189
return False
175-
return self.name == other.name and self.command == other.command and self.args == other.args and set(self.env.items()) == set(other.env.items())
190+
return (
191+
self.name == other.name
192+
and self.command == other.command
193+
and self.args == other.args
194+
and set(self.env.items()) == set(other.env.items())
195+
)
176196

177197

178198
class MCPSHTTPServerConfig(BaseModel):
@@ -191,7 +211,8 @@ class MCPConfig(BaseModel):
191211
192212
Attributes:
193213
sse_servers: List of MCP SSE server configs
194-
stdio_servers: List of MCP stdio server configs. These servers will be added to the MCP Router running inside runtime container.
214+
stdio_servers: List of MCP stdio server configs. These servers will be
215+
added to the MCP Router running inside runtime container.
195216
shttp_servers: List of MCP HTTP server configs.
196217
"""
197218

@@ -242,12 +263,14 @@ def validate_servers(self) -> None:
242263

243264
@classmethod
244265
def from_toml_section(cls, data: dict) -> dict[str, "MCPConfig"]:
245-
"""Create a mapping of MCPConfig instances from a toml dictionary representing the [mcp] section.
266+
"""Create a mapping of MCPConfig instances from a toml dictionary
267+
representing the [mcp] section.
246268
247269
The configuration is built from all keys in data.
248270
249271
Returns:
250-
dict[str, MCPConfig]: A mapping where the key "mcp" corresponds to the [mcp] configuration
272+
dict[str, MCPConfig]: A mapping where the key "mcp" corresponds to the
273+
[mcp] configuration
251274
"""
252275
# Initialize the result mapping
253276
mcp_mapping: dict[str, MCPConfig] = {}
@@ -256,7 +279,9 @@ def from_toml_section(cls, data: dict) -> dict[str, "MCPConfig"]:
256279
# Convert all entries in sse_servers to MCPSSEServerConfig objects
257280
if "sse_servers" in data:
258281
data["sse_servers"] = cls._normalize_servers(data["sse_servers"])
259-
servers: list[MCPSSEServerConfig | MCPStdioServerConfig | MCPSHTTPServerConfig] = []
282+
servers: list[
283+
MCPSSEServerConfig | MCPStdioServerConfig | MCPSHTTPServerConfig
284+
] = []
260285
for server in data["sse_servers"]:
261286
servers.append(MCPSSEServerConfig(**server))
262287
data["sse_servers"] = servers

0 commit comments

Comments
 (0)