Skip to content

Commit b55ef90

Browse files
committed
fix: cleanup
1 parent e742c93 commit b55ef90

File tree

10 files changed

+1371
-147
lines changed

10 files changed

+1371
-147
lines changed

src/agents/_run_impl.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -359,8 +359,6 @@ async def execute_tools_and_side_effects(
359359
# Add all tool results to new_step_items first, including approval items.
360360
# This ensures ToolCallItem items from processed_response.new_items are preserved
361361
# in the conversation history when resuming after an interruption.
362-
from .items import ToolApprovalItem
363-
364362
# Add all function results (including approval items) to new_step_items
365363
for result in function_results:
366364
new_step_items.append(result.run_item)
@@ -991,8 +989,6 @@ async def run_single_tool(
991989
needs_approval_result = func_tool.needs_approval
992990
if callable(needs_approval_result):
993991
# Parse arguments for dynamic approval check
994-
import json
995-
996992
try:
997993
parsed_args = (
998994
json.loads(tool_call.arguments) if tool_call.arguments else {}
@@ -1011,8 +1007,6 @@ async def run_single_tool(
10111007

10121008
if approval_status is None:
10131009
# Not yet decided - need to interrupt for approval
1014-
from .items import ToolApprovalItem
1015-
10161010
approval_item = ToolApprovalItem(
10171011
agent=agent, raw_item=tool_call, tool_name=func_tool.name
10181012
)
@@ -2396,8 +2390,6 @@ def _is_apply_patch_name(name: str | None, tool: ApplyPatchTool | None) -> bool:
23962390
def _build_litellm_json_tool_call(output: ResponseFunctionToolCall) -> FunctionTool:
23972391
async def on_invoke_tool(_ctx: ToolContext[Any], value: Any) -> Any:
23982392
if isinstance(value, str):
2399-
import json
2400-
24012393
return json.loads(value)
24022394
return value
24032395

src/agents/handoffs/history.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ def _build_summary_message(transcript: list[TResponseInputItem]) -> TResponseInp
127127
]
128128
content = "\n".join(content_lines)
129129
summary_message: dict[str, Any] = {
130-
"role": "system",
130+
"role": "assistant",
131131
"content": content,
132132
}
133133
return cast(TResponseInputItem, summary_message)

src/agents/result.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
from typing_extensions import TypeVar
1111

12-
from ._run_impl import QueueCompleteSentinel
12+
from ._run_impl import NextStepInterruption, ProcessedResponse, QueueCompleteSentinel
1313
from .agent import Agent
1414
from .agent_output import AgentOutputSchemaBase
1515
from .exceptions import (
@@ -22,7 +22,9 @@
2222
from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
2323
from .logger import logger
2424
from .run_context import RunContextWrapper
25+
from .run_state import RunState
2526
from .stream_events import StreamEvent
27+
from .tool_guardrails import ToolInputGuardrailResult, ToolOutputGuardrailResult
2628
from .tracing import Trace
2729
from .util._pretty_print import (
2830
pretty_print_result,
@@ -201,9 +203,6 @@ def to_state(self) -> Any:
201203
result = await Runner.run(agent, state)
202204
```
203205
"""
204-
from ._run_impl import NextStepInterruption
205-
from .run_state import RunState
206-
207206
# Create a RunState from the current result
208207
state = RunState(
209208
context=self.context_wrapper,
@@ -508,9 +507,6 @@ def to_state(self) -> Any:
508507
pass
509508
```
510509
"""
511-
from ._run_impl import NextStepInterruption
512-
from .run_state import RunState
513-
514510
# Create a RunState from the current result
515511
state = RunState(
516512
context=self.context_wrapper,

src/agents/run.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import asyncio
44
import contextlib
5+
import dataclasses as _dc
56
import inspect
67
import os
78
import warnings
@@ -56,8 +57,10 @@
5657
ModelResponse,
5758
ReasoningItem,
5859
RunItem,
60+
ToolApprovalItem,
5961
ToolCallItem,
6062
ToolCallItemTypes,
63+
ToolCallOutputItem,
6164
TResponseInputItem,
6265
normalize_function_call_output_payload,
6366
)
@@ -76,7 +79,7 @@
7679
RunItemStreamEvent,
7780
StreamEvent,
7881
)
79-
from .tool import Tool
82+
from .tool import FunctionTool, Tool
8083
from .tool_guardrails import ToolInputGuardrailResult, ToolOutputGuardrailResult
8184
from .tracing import Span, SpanError, agent_span, get_current_trace, trace
8285
from .tracing.span_data import AgentSpanData
@@ -1923,8 +1926,6 @@ async def _run_single_turn_streamed(
19231926
event_queue=streamed_result._event_queue,
19241927
)
19251928

1926-
import dataclasses as _dc
1927-
19281929
# Filter out items that have already been sent to avoid duplicates
19291930
items_to_filter = single_step_result.new_step_items
19301931

@@ -2001,8 +2002,6 @@ async def _execute_approved_tools_static(
20012002
hooks: RunHooks[TContext],
20022003
) -> None:
20032004
"""Execute tools that have been approved after an interruption (classmethod version)."""
2004-
from .items import ToolApprovalItem, ToolCallOutputItem
2005-
20062005
tool_runs: list[ToolRunFunction] = []
20072006

20082007
# Find all tools from the agent
@@ -2116,8 +2115,6 @@ async def _execute_approved_tools_static(
21162115
continue
21172116

21182117
# Only function tools can be executed via ToolRunFunction
2119-
from .tool import FunctionTool
2120-
21212118
if not isinstance(tool, FunctionTool):
21222119
# Only function tools can create proper tool_call_output_item
21232120
error_tool_call = (
@@ -2641,7 +2638,6 @@ def _normalize_input_items(items: list[TResponseInputItem]) -> list[TResponseInp
26412638
Returns:
26422639
Normalized list of input items
26432640
"""
2644-
from .run_state import _normalize_field_names
26452641

26462642
def _coerce_to_dict(value: TResponseInputItem) -> dict[str, Any] | None:
26472643
if isinstance(value, dict):

src/agents/run_state.py

Lines changed: 54 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,54 @@
66
from dataclasses import dataclass, field
77
from typing import TYPE_CHECKING, Any, Generic, cast
88

9+
from openai.types.responses import (
10+
ResponseComputerToolCall,
11+
ResponseFunctionToolCall,
12+
ResponseOutputMessage,
13+
ResponseReasoningItem,
14+
)
15+
from openai.types.responses.response_input_param import (
16+
ComputerCallOutput,
17+
FunctionCallOutput,
18+
LocalShellCallOutput,
19+
McpApprovalResponse,
20+
)
21+
from openai.types.responses.response_output_item import (
22+
McpApprovalRequest,
23+
McpListTools,
24+
)
25+
from pydantic import TypeAdapter, ValidationError
926
from typing_extensions import TypeVar
1027

11-
from ._run_impl import NextStepInterruption
28+
from ._run_impl import (
29+
NextStepInterruption,
30+
ProcessedResponse,
31+
ToolRunComputerAction,
32+
ToolRunFunction,
33+
ToolRunHandoff,
34+
ToolRunMCPApprovalRequest,
35+
)
1236
from .exceptions import UserError
13-
from .items import ToolApprovalItem, normalize_function_call_output_payload
37+
from .handoffs import Handoff
38+
from .items import (
39+
HandoffCallItem,
40+
HandoffOutputItem,
41+
MCPApprovalRequestItem,
42+
MCPApprovalResponseItem,
43+
MCPListToolsItem,
44+
MessageOutputItem,
45+
ModelResponse,
46+
ReasoningItem,
47+
RunItem,
48+
ToolApprovalItem,
49+
ToolCallItem,
50+
ToolCallOutputItem,
51+
TResponseInputItem,
52+
normalize_function_call_output_payload,
53+
)
1454
from .logger import logger
1555
from .run_context import RunContextWrapper
56+
from .tool import ComputerTool, FunctionTool, HostedMCPTool
1657
from .usage import Usage
1758

1859
if TYPE_CHECKING:
@@ -285,6 +326,17 @@ def to_json(self) -> dict[str, Any]:
285326
# Look it up from the corresponding function_call if missing
286327
if "name" not in normalized_item and call_id:
287328
normalized_item["name"] = call_id_to_name.get(call_id, "")
329+
# Convert assistant messages with string content to array format
330+
# TypeScript SDK requires content to be an array for assistant messages
331+
role = normalized_item.get("role")
332+
if role == "assistant":
333+
content = normalized_item.get("content")
334+
if isinstance(content, str):
335+
# Convert string content to array format with output_text
336+
normalized_item["content"] = [{"type": "output_text", "text": content}]
337+
# Ensure status field is present (required by TypeScript schema)
338+
if "status" not in normalized_item:
339+
normalized_item["status"] = "completed"
288340
# Normalize field names to camelCase for JSON (call_id -> callId)
289341
normalized_item = self._camelize_field_names(normalized_item)
290342
normalized_items.append(normalized_item)
@@ -745,8 +797,6 @@ async def from_string(
745797
# Reconstruct current step if it's an interruption
746798
current_step_data = state_json.get("currentStep")
747799
if current_step_data and current_step_data.get("type") == "next_step_interruption":
748-
from openai.types.responses import ResponseFunctionToolCall
749-
750800
interruptions: list[RunItem] = []
751801
# Handle both old format (interruptions directly) and new format (wrapped in data)
752802
interruptions_data = current_step_data.get("data", {}).get(
@@ -880,8 +930,6 @@ async def from_json(
880930
# Reconstruct current step if it's an interruption
881931
current_step_data = state_json.get("currentStep")
882932
if current_step_data and current_step_data.get("type") == "next_step_interruption":
883-
from openai.types.responses import ResponseFunctionToolCall
884-
885933
interruptions: list[RunItem] = []
886934
# Handle both old format (interruptions directly) and new format (wrapped in data)
887935
interruptions_data = current_step_data.get("data", {}).get(
@@ -920,15 +968,6 @@ async def _deserialize_processed_response(
920968
Returns:
921969
A reconstructed ProcessedResponse instance.
922970
"""
923-
from ._run_impl import (
924-
ProcessedResponse,
925-
ToolRunComputerAction,
926-
ToolRunFunction,
927-
ToolRunHandoff,
928-
ToolRunMCPApprovalRequest,
929-
)
930-
from .tool import FunctionTool
931-
932971
# Deserialize new items
933972
new_items = _deserialize_items(processed_response_data.get("newItems", []), agent_map)
934973

@@ -944,13 +983,9 @@ async def _deserialize_processed_response(
944983
tool.name: tool for tool in all_tools if hasattr(tool, "type") and tool.type == "computer"
945984
}
946985
# Build MCP tools map
947-
from .tool import HostedMCPTool
948-
949986
mcp_tools_map = {tool.name: tool for tool in all_tools if isinstance(tool, HostedMCPTool)}
950987

951988
# Get handoffs from the agent
952-
from .handoffs import Handoff
953-
954989
handoffs_map: dict[str, Handoff[Any, Agent[Any]]] = {}
955990
if hasattr(current_agent, "handoffs"):
956991
for handoff in current_agent.handoffs:
@@ -969,8 +1004,6 @@ async def _deserialize_processed_response(
9691004
"handoff", {}
9701005
).get("tool_name")
9711006
if handoff_name and handoff_name in handoffs_map:
972-
from openai.types.responses import ResponseFunctionToolCall
973-
9741007
tool_call = ResponseFunctionToolCall(**tool_call_data)
9751008
handoff = handoffs_map[handoff_name]
9761009
handoffs.append(ToolRunHandoff(tool_call=tool_call, handoff=handoff))
@@ -981,22 +1014,16 @@ async def _deserialize_processed_response(
9811014
tool_call_data = _normalize_field_names(func_data.get("toolCall", {}))
9821015
tool_name = func_data.get("tool", {}).get("name")
9831016
if tool_name and tool_name in tools_map:
984-
from openai.types.responses import ResponseFunctionToolCall
985-
9861017
tool_call = ResponseFunctionToolCall(**tool_call_data)
9871018
function_tool = tools_map[tool_name]
9881019
functions.append(ToolRunFunction(tool_call=tool_call, function_tool=function_tool))
9891020

9901021
# Deserialize computer actions
991-
from .tool import ComputerTool
992-
9931022
computer_actions = []
9941023
for action_data in processed_response_data.get("computerActions", []):
9951024
tool_call_data = _normalize_field_names(action_data.get("toolCall", {}))
9961025
computer_name = action_data.get("computer", {}).get("name")
9971026
if computer_name and computer_name in computer_tools_map:
998-
from openai.types.responses import ResponseComputerToolCall
999-
10001027
computer_tool_call = ResponseComputerToolCall(**tool_call_data)
10011028
computer_tool = computer_tools_map[computer_name]
10021029
# Only include ComputerTool instances
@@ -1011,9 +1038,6 @@ async def _deserialize_processed_response(
10111038
request_item_data = request_data.get("requestItem", {})
10121039
raw_item_data = _normalize_field_names(request_item_data.get("rawItem", {}))
10131040
# Create a McpApprovalRequest from the raw item data
1014-
from openai.types.responses.response_output_item import McpApprovalRequest
1015-
from pydantic import TypeAdapter
1016-
10171041
request_item_adapter: TypeAdapter[McpApprovalRequest] = TypeAdapter(McpApprovalRequest)
10181042
request_item = request_item_adapter.validate_python(raw_item_data)
10191043

@@ -1135,8 +1159,6 @@ def _deserialize_model_responses(responses_data: list[dict[str, Any]]) -> list[M
11351159
List of ModelResponse instances.
11361160
"""
11371161

1138-
from .items import ModelResponse
1139-
11401162
result = []
11411163
for resp_data in responses_data:
11421164
usage = Usage()
@@ -1145,8 +1167,6 @@ def _deserialize_model_responses(responses_data: list[dict[str, Any]]) -> list[M
11451167
usage.output_tokens = resp_data["usage"]["outputTokens"]
11461168
usage.total_tokens = resp_data["usage"]["totalTokens"]
11471169

1148-
from pydantic import TypeAdapter
1149-
11501170
# Normalize output items from JSON format (camelCase) to Python format (snake_case)
11511171
normalized_output = [
11521172
_normalize_field_names(item) if isinstance(item, dict) else item
@@ -1182,28 +1202,6 @@ def _deserialize_items(
11821202
Returns:
11831203
List of RunItem instances.
11841204
"""
1185-
from openai.types.responses import (
1186-
ResponseFunctionToolCall,
1187-
ResponseOutputMessage,
1188-
ResponseReasoningItem,
1189-
)
1190-
from openai.types.responses.response_output_item import (
1191-
McpApprovalRequest,
1192-
McpListTools,
1193-
)
1194-
1195-
from .items import (
1196-
HandoffCallItem,
1197-
HandoffOutputItem,
1198-
MCPApprovalRequestItem,
1199-
MCPApprovalResponseItem,
1200-
MCPListToolsItem,
1201-
MessageOutputItem,
1202-
ReasoningItem,
1203-
ToolApprovalItem,
1204-
ToolCallItem,
1205-
ToolCallOutputItem,
1206-
)
12071205

12081206
result: list[RunItem] = []
12091207

@@ -1264,13 +1262,6 @@ def _deserialize_items(
12641262

12651263
elif item_type == "tool_call_output_item":
12661264
# For tool call outputs, validate and convert the raw dict
1267-
from openai.types.responses.response_input_param import (
1268-
ComputerCallOutput,
1269-
FunctionCallOutput,
1270-
LocalShellCallOutput,
1271-
)
1272-
from pydantic import TypeAdapter
1273-
12741265
# Try to determine the type based on the dict structure
12751266
normalized_raw_item = _convert_protocol_result_to_api(normalized_raw_item)
12761267
output_type = normalized_raw_item.get("type")
@@ -1320,10 +1311,6 @@ def _deserialize_items(
13201311
# For handoff output items, we need to validate the raw_item
13211312
# as a TResponseInputItem (which is a union type)
13221313
# If validation fails, use the raw dict as-is (for test compatibility)
1323-
from pydantic import TypeAdapter, ValidationError
1324-
1325-
from .items import TResponseInputItem
1326-
13271314
try:
13281315
input_item_adapter: TypeAdapter[TResponseInputItem] = TypeAdapter(
13291316
TResponseInputItem
@@ -1355,9 +1342,6 @@ def _deserialize_items(
13551342

13561343
elif item_type == "mcp_approval_response_item":
13571344
# Validate and convert the raw dict to McpApprovalResponse
1358-
from openai.types.responses.response_input_param import McpApprovalResponse
1359-
from pydantic import TypeAdapter
1360-
13611345
approval_response_adapter: TypeAdapter[McpApprovalResponse] = TypeAdapter(
13621346
McpApprovalResponse
13631347
)

0 commit comments

Comments
 (0)