Build AI agents
you can actually steer.
Any model. Any cloud. Open source for Python and TypeScript.
pip install strands-agents npm install @strands-agents/sdk from strands import Agent, tool
@tool
def weather(city: str) -> dict:
"""Get current weather for a city."""
return fetch_weather(city)
agent = Agent(tools=[weather])
agent("What's the weather in Seattle?") from strands import Agent, tool
@tool
def weather(city: str) -> dict:
"""Get current weather for a city."""
return fetch_weather(city)
agent = Agent(tools=[weather])
agent("What's the weather in Seattle?") Write code, not pipelines
Early agent frameworks wrapped models in orchestration logic because models couldn't reason reliably. Now they can. Strands gives you back control: define your tools as functions, write a system prompt, and the agent loop handles execution. No step definitions, no workflow graphs. Just code.
from strands import Agent, tool
from strands_tools import http_request
from pathlib import Path
@tool
def save_report(title: str, content: str) -> str:
"""Save a research report to disk."""
Path(f"reports/{title}.md").write_text(content)
return f"Saved {title}.md"
# The model decides how to classify, research, and draft
agent = Agent(
system_prompt="""You are a research assistant. Classify the topic,
research it using the web, then save a summary report.""",
tools=[http_request, save_report],
)
agent("Summarize recent AI agent papers") from strands import Agent, tool
from strands_tools import http_request
from pathlib import Path
@tool
def save_report(title: str, content: str) -> str:
"""Save a research report to disk."""
Path(f"reports/{title}.md").write_text(content)
return f"Saved {title}.md"
# The model decides how to classify, research, and draft
agent = Agent(
system_prompt="""You are a research assistant. Classify the topic,
research it using the web, then save a summary report.""",
tools=[http_request, save_report],
)
agent("Summarize recent AI agent papers") The model handles orchestration. When it makes a mistake, a plugin handles recovery. Your agent code stays the same.
Your agent ignored your instructions again
You wrote the rules. The model skipped them. Longer prompts make it worse: by line 40, the model is guessing which instructions still matter. Hard-coded workflows are the other extreme: predictable but brittle, and they strip away the reasoning that makes agents useful.
agent = Agent(
system_prompt="""You are a report generator. Always use markdown
tables for comparisons. Never use bullet lists for data. Format
currency as $X,XXX.XX. Include a summary section at the top.
When comparing more than 3 items, split into sub-tables.
Use ISO 8601 dates. Cite sources with inline links.
If the user asks about competitors, stay neutral.
Never share internal pricing..."""
# The model will follow some of these. Guess which ones.
) agent = Agent(
system_prompt="""You are a report generator. Always use markdown
tables for comparisons. Never use bullet lists for data. Format
currency as $X,XXX.XX. Include a summary section at the top.
When comparing more than 3 items, split into sub-tables.
Use ISO 8601 dates. Cite sources with inline links.
If the user asks about competitors, stay neutral.
Never share internal pricing..."""
# The model will follow some of these. Guess which ones.
) Middleware for the agent loop
Steering hooks intercept the agent loop the same way middleware intercepts HTTP requests. Before a tool call, check the inputs. After a model response, validate the output. Each handler is a Python function you can read, test, and debug.
from strands import Agent
from strands.vended_plugins.steering import (
SteeringHandler, ToolSteeringAction,
)
class NoPricingLeaks(SteeringHandler):
async def steer_before_tool(self, *, agent, tool_use, **kwargs):
if tool_use["name"] == "send_email":
if "internal pricing" in str(tool_use["input"]):
return ToolSteeringAction.guide(
"Contains internal pricing. Redact before sending."
)
return ToolSteeringAction.proceed()
agent = Agent(
tools=[send_email, generate_report],
plugins=[NoPricingLeaks()],
) from strands import Agent
from strands.vended_plugins.steering import (
SteeringHandler, ToolSteeringAction,
)
class NoPricingLeaks(SteeringHandler):
async def steer_before_tool(self, *, agent, tool_use, **kwargs):
if tool_use["name"] == "send_email":
if "internal pricing" in str(tool_use["input"]):
return ToolSteeringAction.guide(
"Contains internal pricing. Redact before sending."
)
return ToolSteeringAction.proceed()
agent = Agent(
tools=[send_email, generate_report],
plugins=[NoPricingLeaks()],
) Everything you need to build agents
Any Model Provider
Bedrock, OpenAI, Anthropic, Ollama, LiteLLM. Swap providers with a single line. Your agent code doesn't change.
# Swap providers in one line
from strands.models.openai import OpenAIModel
agent = Agent(model=OpenAIModel(
model_id="gpt-4o"
)) # Swap providers in one line
from strands.models.openai import OpenAIModel
agent = Agent(model=OpenAIModel(
model_id="gpt-4o"
)) Tools from Any Function
Turn any function into an agent tool with @tool. The docstring becomes the LLM's tool description. No schema files, no registration boilerplate.
# Any function becomes a tool
@tool
def search_db(query: str) -> list:
"""Search the product database."""
return db.search(query) # Any function becomes a tool
@tool
def search_db(query: str) -> list:
"""Search the product database."""
return db.search(query) Native MCP Support
Connect to any MCP server. Use thousands of community tools without writing integration code.
# Connect to any MCP server
from strands.tools.mcp import MCPClient
from mcp import stdio_client, StdioServerParameters
mcp = MCPClient(lambda: stdio_client(
StdioServerParameters(
command="uvx",
args=["my-mcp-server"],
)
)) # Connect to any MCP server
from strands.tools.mcp import MCPClient
from mcp import stdio_client, StdioServerParameters
mcp = MCPClient(lambda: stdio_client(
StdioServerParameters(
command="uvx",
args=["my-mcp-server"],
)
)) Multi-Agent Systems
Compose agents with graphs, swarms, workflows, or simple agent-as-tool patterns. Built-in A2A protocol support for distributed systems.
# Agents as tools for other agents
@tool
def research(query: str) -> str:
"""Research a topic thoroughly."""
agent = Agent(tools=[search_web])
return str(agent(query))
writer = Agent(tools=[research])
writer("Write a post about AI agents") # Agents as tools for other agents
@tool
def research(query: str) -> str:
"""Research a topic thoroughly."""
agent = Agent(tools=[search_web])
return str(agent(query))
writer = Agent(tools=[research])
writer("Write a post about AI agents") Conversation Memory
Sliding window, summarization, and session persistence out of the box. Manage context across long conversations without manual token counting.
# Manage context automatically
from strands.agent.conversation_manager import (
SlidingWindowConversationManager,
)
agent = Agent(
conversation_manager=SlidingWindowConversationManager(
window_size=5
),
) # Manage context automatically
from strands.agent.conversation_manager import (
SlidingWindowConversationManager,
)
agent = Agent(
conversation_manager=SlidingWindowConversationManager(
window_size=5
),
) Built-in Observability
OpenTelemetry traces, metrics, and logs with no extra instrumentation. See every tool call, model invocation, and token count.
# Traces with zero config
from strands import Agent
agent = Agent(trace_attributes={
"service": "my-app",
"env": "production",
}) # Traces with zero config
from strands import Agent
agent = Agent(trace_attributes={
"service": "my-app",
"env": "production",
})