Skip to content

Commit f1854c1

Browse files
authored
Merge branch 'strands-agents:main' into pruning
2 parents 9cba45f + 73865d3 commit f1854c1

File tree

161 files changed

+13688
-951
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

161 files changed

+13688
-951
lines changed

.codecov.yml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
coverage:
2+
status:
3+
project:
4+
default:
5+
target: 90% # overall coverage threshold
6+
patch:
7+
default:
8+
target: 90% # patch coverage threshold
9+
base: auto
10+
# Only post patch coverage on decreases
11+
only_pulls: true

.github/workflows/auto-close.yml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,11 @@ on:
1111
default: 'false'
1212
type: boolean
1313

14+
permissions:
15+
contents: read
16+
issues: write
17+
pull-requests: write
18+
1419
jobs:
1520
auto-close:
1621
runs-on: ubuntu-latest
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
name: PR Size Labeler
2+
3+
on:
4+
pull_request_target:
5+
branches: main
6+
7+
jobs:
8+
label-size:
9+
runs-on: ubuntu-latest
10+
permissions:
11+
pull-requests: write
12+
issues: write
13+
steps:
14+
- name: Calculate PR size and apply label
15+
uses: actions/github-script@v8
16+
with:
17+
script: |
18+
const pr = context.payload.pull_request;
19+
const totalChanges = pr.additions + pr.deletions;
20+
21+
// Remove existing size labels
22+
const labels = await github.rest.issues.listLabelsOnIssue({
23+
owner: context.repo.owner,
24+
repo: context.repo.repo,
25+
issue_number: context.payload.pull_request.number
26+
});
27+
28+
for (const label of labels.data) {
29+
if (label.name.startsWith('size/')) {
30+
await github.rest.issues.removeLabel({
31+
owner: context.repo.owner,
32+
repo: context.repo.repo,
33+
issue_number: context.payload.pull_request.number,
34+
name: label.name
35+
});
36+
}
37+
}
38+
39+
// Determine and apply new size label
40+
let sizeLabel;
41+
if (totalChanges <= 20) sizeLabel = 'size/xs';
42+
else if (totalChanges <= 100) sizeLabel = 'size/s';
43+
else if (totalChanges <= 500) sizeLabel = 'size/m';
44+
else if (totalChanges <= 1000) sizeLabel = 'size/l';
45+
else {
46+
sizeLabel = 'size/xl';
47+
}
48+
49+
await github.rest.issues.addLabels({
50+
owner: context.repo.owner,
51+
repo: context.repo.repo,
52+
issue_number: context.payload.pull_request.number,
53+
labels: [sizeLabel]
54+
});
55+
56+
if (sizeLabel === 'size/xl') {
57+
core.setFailed(`PR is too large (${totalChanges} lines). Please split into smaller PRs.`);
58+
}

.github/workflows/test-lint.yml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,11 @@ jobs:
6666
id: tests
6767
run: hatch test tests --cover
6868
continue-on-error: false
69+
70+
- name: Upload coverage reports to Codecov
71+
uses: codecov/codecov-action@v5
72+
with:
73+
token: ${{ secrets.CODECOV_TOKEN }}
6974
lint:
7075
name: Lint
7176
runs-on: ubuntu-latest
@@ -90,5 +95,5 @@ jobs:
9095
9196
- name: Run lint
9297
id: lint
93-
run: hatch run test-lint
98+
run: hatch fmt --linter --check
9499
continue-on-error: false

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,5 @@ __pycache__*
1111
.vscode
1212
dist
1313
repl_state
14-
.kiro
14+
.kiro
15+
uv.lock

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@ repos:
33
hooks:
44
- id: hatch-format
55
name: Format code
6-
entry: hatch run test-format
6+
entry: hatch fmt --formatter --check
77
language: system
88
pass_filenames: false
99
types: [python]
1010
stages: [pre-commit]
1111
- id: hatch-lint
1212
name: Lint code
13-
entry: hatch run test-lint
13+
entry: hatch fmt --linter --check
1414
language: system
1515
pass_filenames: false
1616
types: [python]

CONTRIBUTING.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,18 @@ Before starting work on any issue:
3636
3. Wait for maintainer confirmation before beginning significant work
3737

3838

39+
## Development Tenets
40+
Our team follows these core principles when designing and implementing features. These tenets help us make consistent decisions, resolve trade-offs, and maintain the quality and coherence of the SDK. When contributing, please consider how your changes align with these principles:
41+
42+
1. **Simple at any scale:** We believe that simple things should be simple. The same clean abstractions that power a weekend prototype should scale effortlessly to production workloads. We reject the notion that enterprise-grade means enterprise-complicated - Strands remains approachable whether it's your first agent or your millionth.
43+
2. **Extensible by design:** We allow for as much configuration as possible, from hooks to model providers, session managers, tools, etc. We meet customers where they are with flexible extension points that are simple to integrate with.
44+
3. **Composability:** Primitives are building blocks with each other. Each feature of Strands is developed with all other features in mind, they are consistent and complement one another.
45+
4. **The obvious path is the happy path:** Through intuitive naming, helpful error messages, and thoughtful API design, we guide developers toward correct patterns and away from common pitfalls.
46+
5. **We are accessible to humans and agents:** Strands is designed for both humans and AI to understand equally well. We don’t take shortcuts on curated DX for humans and we go the extra mile to make sure coding assistants can help you use those interfaces the right way.
47+
6. **Embrace common standards:** We respect what came before, and do not want to reinvent something that is already widely adopted or done better.
48+
49+
When proposing solutions or reviewing code, we reference these principles to guide our decisions. If two approaches seem equally valid, we choose the one that best aligns with our tenets.
50+
3951
## Development Environment
4052

4153
This project uses [hatchling](https://hatch.pypa.io/latest/build/#hatchling) as the build backend and [hatch](https://hatch.pypa.io/latest/) for development workflow management.

README.md

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ Strands Agents is a simple yet powerful SDK that takes a model-driven approach t
3737
## Feature Overview
3838

3939
- **Lightweight & Flexible**: Simple agent loop that just works and is fully customizable
40-
- **Model Agnostic**: Support for Amazon Bedrock, Anthropic, LiteLLM, Llama, Ollama, OpenAI, Writer, and custom providers
40+
- **Model Agnostic**: Support for Amazon Bedrock, Anthropic, Gemini, LiteLLM, Llama, Ollama, OpenAI, Writer, and custom providers
4141
- **Advanced Capabilities**: Multi-agent systems, autonomous agents, and streaming support
4242
- **Built-in MCP**: Native support for Model Context Protocol (MCP) servers, enabling access to thousands of pre-built tools
4343

@@ -129,6 +129,7 @@ from strands import Agent
129129
from strands.models import BedrockModel
130130
from strands.models.ollama import OllamaModel
131131
from strands.models.llamaapi import LlamaAPIModel
132+
from strands.models.gemini import GeminiModel
132133
from strands.models.llamacpp import LlamaCppModel
133134

134135
# Bedrock
@@ -140,6 +141,17 @@ bedrock_model = BedrockModel(
140141
agent = Agent(model=bedrock_model)
141142
agent("Tell me about Agentic AI")
142143

144+
# Google Gemini
145+
gemini_model = GeminiModel(
146+
client_args={
147+
"api_key": "your_gemini_api_key",
148+
},
149+
model_id="gemini-2.5-flash",
150+
params={"temperature": 0.7}
151+
)
152+
agent = Agent(model=gemini_model)
153+
agent("Tell me about Agentic AI")
154+
143155
# Ollama
144156
ollama_model = OllamaModel(
145157
host="http://localhost:11434",
@@ -159,12 +171,16 @@ response = agent("Tell me about Agentic AI")
159171
Built-in providers:
160172
- [Amazon Bedrock](https://strandsagents.com/latest/user-guide/concepts/model-providers/amazon-bedrock/)
161173
- [Anthropic](https://strandsagents.com/latest/user-guide/concepts/model-providers/anthropic/)
174+
- [Gemini](https://strandsagents.com/latest/user-guide/concepts/model-providers/gemini/)
175+
- [Cohere](https://strandsagents.com/latest/user-guide/concepts/model-providers/cohere/)
162176
- [LiteLLM](https://strandsagents.com/latest/user-guide/concepts/model-providers/litellm/)
163177
- [llama.cpp](https://strandsagents.com/latest/user-guide/concepts/model-providers/llamacpp/)
164178
- [LlamaAPI](https://strandsagents.com/latest/user-guide/concepts/model-providers/llamaapi/)
179+
- [MistralAI](https://strandsagents.com/latest/user-guide/concepts/model-providers/mistral/)
165180
- [Ollama](https://strandsagents.com/latest/user-guide/concepts/model-providers/ollama/)
166181
- [OpenAI](https://strandsagents.com/latest/user-guide/concepts/model-providers/openai/)
167-
- [Writer](https://strandsagents.com/latest/documentation/docs/user-guide/concepts/model-providers/writer/)
182+
- [SageMaker](https://strandsagents.com/latest/user-guide/concepts/model-providers/sagemaker/)
183+
- [Writer](https://strandsagents.com/latest/user-guide/concepts/model-providers/writer/)
168184

169185
Custom providers can be implemented using [Custom Providers](https://strandsagents.com/latest/user-guide/concepts/model-providers/custom_model_provider/)
170186

pyproject.toml

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ dependencies = [
3030
"boto3>=1.26.0,<2.0.0",
3131
"botocore>=1.29.0,<2.0.0",
3232
"docstring_parser>=0.15,<1.0",
33+
"jsonschema>=4.0.0,<5.0.0",
3334
"mcp>=1.11.0,<2.0.0",
3435
"pydantic>=2.4.0,<3.0.0",
3536
"typing-extensions>=4.13.2,<5.0.0",
@@ -42,7 +43,8 @@ dependencies = [
4243

4344
[project.optional-dependencies]
4445
anthropic = ["anthropic>=0.21.0,<1.0.0"]
45-
litellm = ["litellm>=1.75.9,<2.0.0", "openai>=1.68.0,<1.108.0"]
46+
gemini = ["google-genai>=1.32.0,<2.0.0"]
47+
litellm = ["litellm>=1.75.9,<2.0.0", "openai>=1.68.0,<1.110.0"]
4648
llamaapi = ["llama-api-client>=0.1.0,<1.0.0"]
4749
mistral = ["mistralai>=1.8.2"]
4850
ollama = ["ollama>=0.4.8,<1.0.0"]
@@ -54,9 +56,9 @@ sagemaker = [
5456
]
5557
otel = ["opentelemetry-exporter-otlp-proto-http>=1.30.0,<2.0.0"]
5658
docs = [
57-
"sphinx>=5.0.0,<6.0.0",
59+
"sphinx>=5.0.0,<9.0.0",
5860
"sphinx-rtd-theme>=1.0.0,<2.0.0",
59-
"sphinx-autodoc-typehints>=1.12.0,<2.0.0",
61+
"sphinx-autodoc-typehints>=1.12.0,<4.0.0",
6062
]
6163

6264
a2a = [
@@ -67,7 +69,7 @@ a2a = [
6769
"fastapi>=0.115.12,<1.0.0",
6870
"starlette>=0.46.2,<1.0.0",
6971
]
70-
all = ["strands-agents[a2a,anthropic,docs,litellm,llamaapi,mistral,ollama,openai,writer,sagemaker,otel]"]
72+
all = ["strands-agents[a2a,anthropic,docs,gemini,litellm,llamaapi,mistral,ollama,openai,writer,sagemaker,otel]"]
7173

7274
dev = [
7375
"commitizen>=4.4.0,<5.0.0",
@@ -77,7 +79,7 @@ dev = [
7779
"pre-commit>=3.2.0,<4.4.0",
7880
"pytest>=8.0.0,<9.0.0",
7981
"pytest-cov>=7.0.0,<8.0.0",
80-
"pytest-asyncio>=1.0.0,<1.2.0",
82+
"pytest-asyncio>=1.0.0,<1.3.0",
8183
"pytest-xdist>=3.0.0,<4.0.0",
8284
"ruff>=0.13.0,<0.14.0",
8385
]
@@ -130,7 +132,7 @@ extra-args = ["-n", "auto", "-vv"]
130132
dependencies = [
131133
"pytest>=8.0.0,<9.0.0",
132134
"pytest-cov>=7.0.0,<8.0.0",
133-
"pytest-asyncio>=1.0.0,<1.2.0",
135+
"pytest-asyncio>=1.0.0,<1.3.0",
134136
"pytest-xdist>=3.0.0,<4.0.0",
135137
"moto>=5.1.0,<6.0.0",
136138
]
@@ -169,7 +171,8 @@ test = "hatch test {args}"
169171
test-integ = "hatch test tests_integ {args}"
170172

171173
prepare = [
172-
"hatch run test-format",
174+
"hatch run format",
175+
"hatch run lint",
173176
"hatch run test-lint",
174177
"hatch test --all"
175178
]

src/strands/__init__.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,12 @@
55
from .tools.decorator import tool
66
from .types.tools import ToolContext
77

8-
__all__ = ["Agent", "agent", "models", "tool", "types", "telemetry", "ToolContext"]
8+
__all__ = [
9+
"Agent",
10+
"agent",
11+
"models",
12+
"tool",
13+
"ToolContext",
14+
"types",
15+
"telemetry",
16+
]

0 commit comments

Comments
 (0)