参考数据:工具使用参考 — Python
Data: Tool use reference — Python
v2.1.63Python tool use reference including tool runner, manual agentic loop, code execution, and structured outputs
工具使用 — Python
关于概念性概述(工具定义、工具选择、技巧),请参阅 shared/tool-use-concepts.md。
工具运行器(推荐)
Beta 版本: 工具运行器在 Python SDK 中处于 Beta 阶段。
使用 @beta_tool 装饰器将工具定义为类型化函数,然后将其传递给 client.beta.messages.tool_runner():
import anthropic
from anthropic import beta_tool
client = anthropic.Anthropic()
@beta_tool
def get_weather(location: str, unit: str = "celsius") -> str:
"""获取指定地点的当前天气。
Args:
location: 城市和州,例如:San Francisco, CA。
unit: 温度单位,可选 "celsius" 或 "fahrenheit"。
"""
# 你的实现代码
return f"72°F and sunny in {location}"
# 工具运行器自动处理 agentic 循环
runner = client.beta.messages.tool_runner(
model="{\{OPUS_ID}\}",
max_tokens=4096,
tools=[get_weather],
messages=[{"role": "user", "content": "What's the weather in Paris?"}],
)
# 每次迭代产生一个 BetaMessage;当 Claude 完成时迭代停止
for message in runner:
print(message)对于异步使用,请将 @beta_async_tool 与 async def 函数一起使用。
工具运行器的主要优势:
- 无需手动循环 — SDK 处理调用工具和反馈结果
- 通过装饰器实现类型安全的工具输入
- 工具模式根据函数签名自动生成
- 当 Claude 不再调用工具时,迭代自动停止
手动 Agentic 循环
当你需要对循环进行细粒度控制时使用此方法(例如,自定义日志记录、条件性工具执行、人工介入审批):
import anthropic
client = anthropic.Anthropic()
tools = [...] # 你的工具定义
messages = [{"role": "user", "content": user_input}]
# Agentic 循环:持续进行直到 Claude 停止调用工具
while True:
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=4096,
tools=tools,
messages=messages
)
# 如果 Claude 已完成(不再调用工具),则中断
if response.stop_reason == "end_turn":
break
# 服务器端工具调用达到迭代限制;重新发送以继续
if response.stop_reason == "pause_turn":
messages = [
{"role": "user", "content": user_input},
{"role": "assistant", "content": response.content},
]
continue
# 从响应中提取 tool_use 块
tool_use_blocks = [b for b in response.content if b.type == "tool_use"]
# 追加 assistant 的响应(包括 tool_use 块)
messages.append({"role": "assistant", "content": response.content})
# 执行每个工具并收集结果
tool_results = []
for tool in tool_use_blocks:
result = execute_tool(tool.name, tool.input) # 你的实现
tool_results.append({
"type": "tool_result",
"tool_use_id": tool.id, # 必须与 tool_use 块的 id 匹配
"content": result
})
# 将工具结果作为用户消息追加
messages.append({"role": "user", "content": tool_results})
# 最终响应文本
final_text = next(b.text for b in response.content if b.type == "text")处理工具结果
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
tools=tools,
messages=[{"role": "user", "content": "What's the weather in Paris?"}]
)
for block in response.content:
if block.type == "tool_use":
tool_name = block.name
tool_input = block.input
tool_use_id = block.id
result = execute_tool(tool_name, tool_input)
followup = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
tools=tools,
messages=[
{"role": "user", "content": "What's the weather in Paris?"},
{"role": "assistant", "content": response.content},
{
"role": "user",
"content": [{
"type": "tool_result",
"tool_use_id": tool_use_id,
"content": result
}]
}
]
)多个工具调用
tool_results = []
for block in response.content:
if block.type == "tool_use":
result = execute_tool(block.name, block.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": result
})
# 一次性发送所有结果
if tool_results:
followup = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
tools=tools,
messages=[
*previous_messages,
{"role": "assistant", "content": response.content},
{"role": "user", "content": tool_results}
]
)工具结果中的错误处理
tool_result = {
"type": "tool_result",
"tool_use_id": tool_use_id,
"content": "Error: Location 'xyz' not found. Please provide a valid city name.",
"is_error": True
}工具选择
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
tools=tools,
tool_choice={"type": "tool", "name": "get_weather"}, # 强制使用特定工具
messages=[{"role": "user", "content": "What's the weather in Paris?"}]
)代码执行
基本用法
import anthropic
client = anthropic.Anthropic()
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=4096,
messages=[{
"role": "user",
"content": "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
}],
tools=[{
"type": "code_execution_20260120",
"name": "code_execution"
}]
)
for block in response.content:
if block.type == "text":
print(block.text)
elif block.type == "bash_code_execution_tool_result":
print(f"stdout: {block.content.stdout}")上传文件进行分析
# 1. 上传文件
uploaded = client.beta.files.upload(file=open("sales_data.csv", "rb"))
# 2. 通过 container_upload 块传递给代码执行
# 代码执行是 GA 版本;Files API 仍处于 beta 阶段(通过 extra_headers 传递)
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=4096,
extra_headers={"anthropic-beta": "files-api-2025-04-14"},
messages=[{
"role": "user",
"content": [
{"type": "text", "text": "Analyze this sales data. Show trends and create a visualization."},
{"type": "container_upload", "file_id": uploaded.id}
]
}],
tools=[{"type": "code_execution_20260120", "name": "code_execution"}]
)检索生成的文件
import os
OUTPUT_DIR = "./claude_outputs"
os.makedirs(OUTPUT_DIR, exist_ok=True)
for block in response.content:
if block.type == "bash_code_execution_tool_result":
result = block.content
if result.type == "bash_code_execution_result" and result.content:
for file_ref in result.content:
if file_ref.type == "bash_code_execution_output":
metadata = client.beta.files.retrieve_metadata(file_ref.file_id)
file_content = client.beta.files.download(file_ref.file_id)
# 使用 basename 防止路径遍历;验证结果
safe_name = os.path.basename(metadata.filename)
if not safe_name or safe_name in (".", ".."):
print(f"Skipping invalid filename: {metadata.filename}")
continue
output_path = os.path.join(OUTPUT_DIR, safe_name)
file_content.write_to_file(output_path)
print(f"Saved: {output_path}")容器复用
# 第一个请求:设置环境
response1 = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=4096,
messages=[{"role": "user", "content": "Install tabulate and create data.json with sample data"}],
tools=[{"type": "code_execution_20260120", "name": "code_execution"}]
)
# 从响应中获取容器 ID
container_id = response1.container.id
# 第二个请求:复用同一个容器
response2 = client.messages.create(
container=container_id,
model="{\{OPUS_ID}\}",
max_tokens=4096,
messages=[{"role": "user", "content": "Read data.json and display as a formatted table"}],
tools=[{"type": "code_execution_20260120", "name": "code_execution"}]
)响应结构
for block in response.content:
if block.type == "text":
print(block.text) # Claude 的解释
elif block.type == "server_tool_use":
print(f"Running: {block.name} - {block.input}") # Claude 正在做什么
elif block.type == "bash_code_execution_tool_result":
result = block.content
if result.type == "bash_code_execution_result":
if result.return_code == 0:
print(f"Output: {result.stdout}")
else:
print(f"Error: {result.stderr}")
else:
print(f"Tool error: {result.error_code}")
elif block.type == "text_editor_code_execution_tool_result":
print(f"File operation: {block.content}")记忆工具
基本用法
import anthropic
client = anthropic.Anthropic()
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=2048,
messages=[{"role": "user", "content": "Remember that my preferred language is Python."}],
tools=[{"type": "memory_20250818", "name": "memory"}],
)SDK 记忆助手
继承 BetaAbstractMemoryTool:
from anthropic.lib.tools import BetaAbstractMemoryTool
class MyMemoryTool(BetaAbstractMemoryTool):
def view(self, command): ...
def create(self, command): ...
def str_replace(self, command): ...
def insert(self, command): ...
def delete(self, command): ...
def rename(self, command): ...
memory = MyMemoryTool()
# 与工具运行器一起使用
runner = client.beta.messages.tool_runner(
model="{\{OPUS_ID}\}",
max_tokens=2048,
tools=[memory],
messages=[{"role": "user", "content": "Remember my preferences"}],
)
for message in runner:
print(message)有关完整实现示例,请使用 WebFetch:
https://github.com/anthropics/anthropic-sdk-python/blob/main/examples/memory/basic.py
结构化输出
JSON 输出(Pydantic — 推荐)
from pydantic import BaseModel
from typing import List
import anthropic
class ContactInfo(BaseModel):
name: str
email: str
plan: str
interests: List[str]
demo_requested: bool
client = anthropic.Anthropic()
response = client.messages.parse(
model="{\{OPUS_ID}\}",
max_tokens=1024,
messages=[{
"role": "user",
"content": "Extract: Jane Doe (jane@co.com) wants Enterprise, interested in API and SDKs, wants a demo."
}],
output_format=ContactInfo,
)
# response.parsed_output 是一个经过验证的 ContactInfo 实例
contact = response.parsed_output
print(contact.name) # "Jane Doe"
print(contact.interests) # ["API", "SDKs"]原始模式
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
messages=[{
"role": "user",
"content": "Extract info: John Smith (john@example.com) wants the Enterprise plan."
}],
output_config={
"format": {
"type": "json_schema",
"schema": {
"type": "object",
"properties": {
"name": {"type": "string"},
"email": {"type": "string"},
"plan": {"type": "string"},
"demo_requested": {"type": "boolean"}
},
"required": ["name", "email", "plan", "demo_requested"],
"additionalProperties": False
}
}
}
)
import json
data = json.loads(response.content[0].text)严格工具使用
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
messages=[{"role": "user", "content": "Book a flight to Tokyo for 2 passengers on March 15"}],
tools=[{
"name": "book_flight",
"description": "Book a flight to a destination",
"strict": True,
"input_schema": {
"type": "object",
"properties": {
"destination": {"type": "string"},
"date": {"type": "string", "format": "date"},
"passengers": {"type": "integer", "enum": [1, 2, 3, 4, 5, 6, 7, 8]}
},
"required": ["destination", "date", "passengers"],
"additionalProperties": False
}
}]
)同时使用两者
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
messages=[{"role": "user", "content": "Plan a trip to Paris next month"}],
output_config={
"format": {
"type": "json_schema",
"schema": {
"type": "object",
"properties": {
"summary": {"type": "string"},
"next_steps": {"type": "array", "items": {"type": "string"}\}
},
"required": ["summary", "next_steps"],
"additionalProperties": False
}
}
},
tools=[{
"name": "search_flights",
"description": "Search for available flights",
"strict": True,
"input_schema": {
"type": "object",
"properties": {
"destination": {"type": "string"},
"date": {"type": "string", "format": "date"}
},
"required": ["destination", "date"],
"additionalProperties": False
}
}]
)英文原文 / English Original
Tool Use — Python
For conceptual overview (tool definitions, tool choice, tips), see shared/tool-use-concepts.md.
Tool Runner (Recommended)
Beta: The tool runner is in beta in the Python SDK.
Use the @beta_tool decorator to define tools as typed functions, then pass them to client.beta.messages.tool_runner():
import anthropic
from anthropic import beta_tool
client = anthropic.Anthropic()
@beta_tool
def get_weather(location: str, unit: str = "celsius") -> str:
"""Get current weather for a location.
Args:
location: City and state, e.g., San Francisco, CA.
unit: Temperature unit, either "celsius" or "fahrenheit".
"""
# Your implementation here
return f"72°F and sunny in {location}"
# The tool runner handles the agentic loop automatically
runner = client.beta.messages.tool_runner(
model="{\{OPUS_ID}\}",
max_tokens=4096,
tools=[get_weather],
messages=[{"role": "user", "content": "What's the weather in Paris?"}],
)
# Each iteration yields a BetaMessage; iteration stops when Claude is done
for message in runner:
print(message)For async usage, use @beta_async_tool with async def functions.
Key benefits of the tool runner:
- No manual loop — the SDK handles calling tools and feeding results back
- Type-safe tool inputs via decorators
- Tool schemas are generated automatically from function signatures
- Iteration stops automatically when Claude has no more tool calls
Manual Agentic Loop
Use this when you need fine-grained control over the loop (e.g., custom logging, conditional tool execution, human-in-the-loop approval):
import anthropic
client = anthropic.Anthropic()
tools = [...] # Your tool definitions
messages = [{"role": "user", "content": user_input}]
# Agentic loop: keep going until Claude stops calling tools
while True:
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=4096,
tools=tools,
messages=messages
)
# If Claude is done (no more tool calls), break
if response.stop_reason == "end_turn":
break
# Server-side tool hit iteration limit; re-send to continue
if response.stop_reason == "pause_turn":
messages = [
{"role": "user", "content": user_input},
{"role": "assistant", "content": response.content},
]
continue
# Extract tool use blocks from the response
tool_use_blocks = [b for b in response.content if b.type == "tool_use"]
# Append assistant's response (including tool_use blocks)
messages.append({"role": "assistant", "content": response.content})
# Execute each tool and collect results
tool_results = []
for tool in tool_use_blocks:
result = execute_tool(tool.name, tool.input) # Your implementation
tool_results.append({
"type": "tool_result",
"tool_use_id": tool.id, # Must match the tool_use block's id
"content": result
})
# Append tool results as a user message
messages.append({"role": "user", "content": tool_results})
# Final response text
final_text = next(b.text for b in response.content if b.type == "text")Handling Tool Results
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
tools=tools,
messages=[{"role": "user", "content": "What's the weather in Paris?"}]
)
for block in response.content:
if block.type == "tool_use":
tool_name = block.name
tool_input = block.input
tool_use_id = block.id
result = execute_tool(tool_name, tool_input)
followup = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
tools=tools,
messages=[
{"role": "user", "content": "What's the weather in Paris?"},
{"role": "assistant", "content": response.content},
{
"role": "user",
"content": [{
"type": "tool_result",
"tool_use_id": tool_use_id,
"content": result
}]
}
]
)Multiple Tool Calls
tool_results = []
for block in response.content:
if block.type == "tool_use":
result = execute_tool(block.name, block.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": result
})
# Send all results back at once
if tool_results:
followup = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
tools=tools,
messages=[
*previous_messages,
{"role": "assistant", "content": response.content},
{"role": "user", "content": tool_results}
]
)Error Handling in Tool Results
tool_result = {
"type": "tool_result",
"tool_use_id": tool_use_id,
"content": "Error: Location 'xyz' not found. Please provide a valid city name.",
"is_error": True
}Tool Choice
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
tools=tools,
tool_choice={"type": "tool", "name": "get_weather"}, # Force specific tool
messages=[{"role": "user", "content": "What's the weather in Paris?"}]
)Code Execution
Basic Usage
import anthropic
client = anthropic.Anthropic()
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=4096,
messages=[{
"role": "user",
"content": "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
}],
tools=[{
"type": "code_execution_20260120",
"name": "code_execution"
}]
)
for block in response.content:
if block.type == "text":
print(block.text)
elif block.type == "bash_code_execution_tool_result":
print(f"stdout: {block.content.stdout}")Upload Files for Analysis
# 1. Upload a file
uploaded = client.beta.files.upload(file=open("sales_data.csv", "rb"))
# 2. Pass to code execution via container_upload block
# Code execution is GA; Files API is still beta (pass via extra_headers)
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=4096,
extra_headers={"anthropic-beta": "files-api-2025-04-14"},
messages=[{
"role": "user",
"content": [
{"type": "text", "text": "Analyze this sales data. Show trends and create a visualization."},
{"type": "container_upload", "file_id": uploaded.id}
]
}],
tools=[{"type": "code_execution_20260120", "name": "code_execution"}]
)Retrieve Generated Files
import os
OUTPUT_DIR = "./claude_outputs"
os.makedirs(OUTPUT_DIR, exist_ok=True)
for block in response.content:
if block.type == "bash_code_execution_tool_result":
result = block.content
if result.type == "bash_code_execution_result" and result.content:
for file_ref in result.content:
if file_ref.type == "bash_code_execution_output":
metadata = client.beta.files.retrieve_metadata(file_ref.file_id)
file_content = client.beta.files.download(file_ref.file_id)
# Use basename to prevent path traversal; validate result
safe_name = os.path.basename(metadata.filename)
if not safe_name or safe_name in (".", ".."):
print(f"Skipping invalid filename: {metadata.filename}")
continue
output_path = os.path.join(OUTPUT_DIR, safe_name)
file_content.write_to_file(output_path)
print(f"Saved: {output_path}")Container Reuse
# First request: set up environment
response1 = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=4096,
messages=[{"role": "user", "content": "Install tabulate and create data.json with sample data"}],
tools=[{"type": "code_execution_20260120", "name": "code_execution"}]
)
# Get container ID from response
container_id = response1.container.id
# Second request: reuse the same container
response2 = client.messages.create(
container=container_id,
model="{\{OPUS_ID}\}",
max_tokens=4096,
messages=[{"role": "user", "content": "Read data.json and display as a formatted table"}],
tools=[{"type": "code_execution_20260120", "name": "code_execution"}]
)Response Structure
for block in response.content:
if block.type == "text":
print(block.text) # Claude's explanation
elif block.type == "server_tool_use":
print(f"Running: {block.name} - {block.input}") # What Claude is doing
elif block.type == "bash_code_execution_tool_result":
result = block.content
if result.type == "bash_code_execution_result":
if result.return_code == 0:
print(f"Output: {result.stdout}")
else:
print(f"Error: {result.stderr}")
else:
print(f"Tool error: {result.error_code}")
elif block.type == "text_editor_code_execution_tool_result":
print(f"File operation: {block.content}")Memory Tool
Basic Usage
import anthropic
client = anthropic.Anthropic()
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=2048,
messages=[{"role": "user", "content": "Remember that my preferred language is Python."}],
tools=[{"type": "memory_20250818", "name": "memory"}],
)SDK Memory Helper
Subclass BetaAbstractMemoryTool:
from anthropic.lib.tools import BetaAbstractMemoryTool
class MyMemoryTool(BetaAbstractMemoryTool):
def view(self, command): ...
def create(self, command): ...
def str_replace(self, command): ...
def insert(self, command): ...
def delete(self, command): ...
def rename(self, command): ...
memory = MyMemoryTool()
# Use with tool runner
runner = client.beta.messages.tool_runner(
model="{\{OPUS_ID}\}",
max_tokens=2048,
tools=[memory],
messages=[{"role": "user", "content": "Remember my preferences"}],
)
for message in runner:
print(message)For full implementation examples, use WebFetch:
https://github.com/anthropics/anthropic-sdk-python/blob/main/examples/memory/basic.py
Structured Outputs
JSON Outputs (Pydantic — Recommended)
from pydantic import BaseModel
from typing import List
import anthropic
class ContactInfo(BaseModel):
name: str
email: str
plan: str
interests: List[str]
demo_requested: bool
client = anthropic.Anthropic()
response = client.messages.parse(
model="{\{OPUS_ID}\}",
max_tokens=1024,
messages=[{
"role": "user",
"content": "Extract: Jane Doe (jane@co.com) wants Enterprise, interested in API and SDKs, wants a demo."
}],
output_format=ContactInfo,
)
# response.parsed_output is a validated ContactInfo instance
contact = response.parsed_output
print(contact.name) # "Jane Doe"
print(contact.interests) # ["API", "SDKs"]Raw Schema
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
messages=[{
"role": "user",
"content": "Extract info: John Smith (john@example.com) wants the Enterprise plan."
}],
output_config={
"format": {
"type": "json_schema",
"schema": {
"type": "object",
"properties": {
"name": {"type": "string"},
"email": {"type": "string"},
"plan": {"type": "string"},
"demo_requested": {"type": "boolean"}
},
"required": ["name", "email", "plan", "demo_requested"],
"additionalProperties": False
}
}
}
)
import json
data = json.loads(response.content[0].text)Strict Tool Use
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
messages=[{"role": "user", "content": "Book a flight to Tokyo for 2 passengers on March 15"}],
tools=[{
"name": "book_flight",
"description": "Book a flight to a destination",
"strict": True,
"input_schema": {
"type": "object",
"properties": {
"destination": {"type": "string"},
"date": {"type": "string", "format": "date"},
"passengers": {"type": "integer", "enum": [1, 2, 3, 4, 5, 6, 7, 8]}
},
"required": ["destination", "date", "passengers"],
"additionalProperties": False
}
}]
)Using Both Together
response = client.messages.create(
model="{\{OPUS_ID}\}",
max_tokens=1024,
messages=[{"role": "user", "content": "Plan a trip to Paris next month"}],
output_config={
"format": {
"type": "json_schema",
"schema": {
"type": "object",
"properties": {
"summary": {"type": "string"},
"next_steps": {"type": "array", "items": {"type": "string"}\}
},
"required": ["summary", "next_steps"],
"additionalProperties": False
}
}
},
tools=[{
"name": "search_flights",
"description": "Search for available flights",
"strict": True,
"input_schema": {
"type": "object",
"properties": {
"destination": {"type": "string"},
"date": {"type": "string", "format": "date"}
},
"required": ["destination", "date"],
"additionalProperties": False
}
}]
)