45
45
from ..settings import ModelSettings , merge_model_settings
46
46
from ..tools import (
47
47
AgentDepsT ,
48
- DeferredToolCallResult ,
49
- DeferredToolResult ,
50
48
DeferredToolResults ,
51
49
DocstringFormat ,
52
50
GenerateToolJsonSchema ,
53
51
RunContext ,
54
52
Tool ,
55
- ToolApproved ,
56
- ToolDenied ,
57
53
ToolFuncContext ,
58
54
ToolFuncEither ,
59
55
ToolFuncPlain ,
@@ -462,7 +458,7 @@ def iter(
462
458
) -> AbstractAsyncContextManager [AgentRun [AgentDepsT , RunOutputDataT ]]: ...
463
459
464
460
@asynccontextmanager
465
- async def iter ( # noqa: C901
461
+ async def iter (
466
462
self ,
467
463
user_prompt : str | Sequence [_messages .UserContent ] | None = None ,
468
464
* ,
@@ -505,7 +501,6 @@ async def main():
505
501
[
506
502
UserPromptNode(
507
503
user_prompt='What is the capital of France?',
508
- instructions=None,
509
504
instructions_functions=[],
510
505
system_prompts=(),
511
506
system_prompt_functions=[],
@@ -559,7 +554,6 @@ async def main():
559
554
del model
560
555
561
556
deps = self ._get_deps (deps )
562
- new_message_index = len (message_history ) if message_history else 0
563
557
output_schema = self ._prepare_output_schema (output_type , model_used .profile )
564
558
565
559
output_type_ = output_type or self .output_type
@@ -620,27 +614,12 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None:
620
614
instrumentation_settings = None
621
615
tracer = NoOpTracer ()
622
616
623
- tool_call_results : dict [str , DeferredToolResult ] | None = None
624
- if deferred_tool_results is not None :
625
- tool_call_results = {}
626
- for tool_call_id , approval in deferred_tool_results .approvals .items ():
627
- if approval is True :
628
- approval = ToolApproved ()
629
- elif approval is False :
630
- approval = ToolDenied ()
631
- tool_call_results [tool_call_id ] = approval
632
-
633
- if calls := deferred_tool_results .calls :
634
- call_result_types = _utils .get_union_args (DeferredToolCallResult )
635
- for tool_call_id , result in calls .items ():
636
- if not isinstance (result , call_result_types ):
637
- result = _messages .ToolReturn (result )
638
- tool_call_results [tool_call_id ] = result
639
-
640
- graph_deps = _agent_graph .GraphAgentDeps [AgentDepsT , RunOutputDataT ](
617
+ graph_deps = _agent_graph .GraphAgentDeps [
618
+ AgentDepsT , RunOutputDataT
619
+ ](
641
620
user_deps = deps ,
642
621
prompt = user_prompt ,
643
- new_message_index = new_message_index ,
622
+ new_message_index = 0 , # This will be set in `UserPromptNode` based on the length of the cleaned message history
644
623
model = model_used ,
645
624
model_settings = model_settings ,
646
625
usage_limits = usage_limits ,
@@ -651,13 +630,13 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None:
651
630
history_processors = self .history_processors ,
652
631
builtin_tools = list (self ._builtin_tools ),
653
632
tool_manager = tool_manager ,
654
- tool_call_results = tool_call_results ,
655
633
tracer = tracer ,
656
634
get_instructions = get_instructions ,
657
635
instrumentation_settings = instrumentation_settings ,
658
636
)
659
637
start_node = _agent_graph .UserPromptNode [AgentDepsT ](
660
638
user_prompt = user_prompt ,
639
+ deferred_tool_results = deferred_tool_results ,
661
640
instructions = self ._instructions ,
662
641
instructions_functions = self ._instructions_functions ,
663
642
system_prompts = self ._system_prompts ,
0 commit comments