From dc7530caf705d1bf9ac8ac58b3899ac77119c6d3 Mon Sep 17 00:00:00 2001 From: "claude[bot]" <209825114+claude[bot]@users.noreply.github.com> Date: Fri, 12 Sep 2025 20:20:51 +0000 Subject: [PATCH 1/3] fix: make popped ModelRequest content available in ctx.prompt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When invoking an agent with only message_history, the latest ModelRequest gets popped off the history but wasn't made available in RunContext.prompt. This fix extracts UserPromptPart content from the popped message and sets it to ctx.deps.prompt. Also moves the build_run_context call to after message processing to ensure the context includes the extracted prompt content. Fixes #2876 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Douwe Maan --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 21 +++++++++++++------- tests/test_agent.py | 7 ++++++- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index e121ec475a..ef7978d096 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -186,14 +186,7 @@ async def run( # Use the `capture_run_messages` list as the message history so that new messages are added to it ctx.state.message_history = messages - run_context = build_run_context(ctx) - parts: list[_messages.ModelRequestPart] = [] - if messages: - # Reevaluate any dynamic system prompt parts - await self._reevaluate_dynamic_prompts(messages, run_context) - else: - parts.extend(await self._sys_parts(run_context)) if (tool_call_results := ctx.deps.tool_call_results) is not None: if messages and (last_message := messages[-1]) and isinstance(last_message, _messages.ModelRequest): @@ -214,11 +207,25 @@ async def run( # Drop last message from history and reuse its parts messages.pop() parts.extend(last_message.parts) + # Extract UserPromptPart content from the popped message and add to ctx.deps.prompt + user_prompt_parts = [part for part in last_message.parts if isinstance(part, _messages.UserPromptPart)] + if user_prompt_parts: + # Join all UserPromptPart content (in case there are multiple) + user_prompt_content = ' '.join(str(part.content) for part in user_prompt_parts) + ctx.deps.prompt = user_prompt_content elif isinstance(last_message, _messages.ModelResponse): call_tools_node = await self._handle_message_history_model_response(ctx, last_message) if call_tools_node is not None: return call_tools_node + run_context = build_run_context(ctx) + + if messages: + # Reevaluate any dynamic system prompt parts + await self._reevaluate_dynamic_prompts(messages, run_context) + else: + parts.extend(await self._sys_parts(run_context)) + if self.user_prompt is not None: parts.append(_messages.UserPromptPart(self.user_prompt)) diff --git a/tests/test_agent.py b/tests/test_agent.py index 261cdfaee1..f57787ee2a 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -2008,7 +2008,12 @@ def test_run_with_history_ending_on_model_request_and_no_user_prompt(): ] m = TestModel() - agent = Agent(m, instructions='New instructions') + agent = Agent(m) + + @agent.instructions + async def instructions(ctx: RunContext) -> str: + assert ctx.prompt == 'Hello' + return 'New instructions' result = agent.run_sync(message_history=messages) assert result.all_messages() == snapshot( From be7420302cf104d2fa80367286a9fa593c3d02fa Mon Sep 17 00:00:00 2001 From: "claude[bot]" <209825114+claude[bot]@users.noreply.github.com> Date: Fri, 12 Sep 2025 21:31:49 +0000 Subject: [PATCH 2/3] fix: properly combine UserPromptPart contents without unnecessary string conversion - Remove string joining logic in favor of direct content assignment - For single UserPromptPart, assign content directly to ctx.deps.prompt - For multiple parts, combine as Sequence[UserContent] maintaining type compatibility - Expand test to include ImageUrl in content and verify proper handling Co-authored-by: Douwe Maan --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 15 ++++++++++++--- tests/test_agent.py | 9 ++++++--- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index ef7978d096..8d298d0c3d 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -210,9 +210,18 @@ async def run( # Extract UserPromptPart content from the popped message and add to ctx.deps.prompt user_prompt_parts = [part for part in last_message.parts if isinstance(part, _messages.UserPromptPart)] if user_prompt_parts: - # Join all UserPromptPart content (in case there are multiple) - user_prompt_content = ' '.join(str(part.content) for part in user_prompt_parts) - ctx.deps.prompt = user_prompt_content + # For single part, use content directly; for multiple parts, combine them + if len(user_prompt_parts) == 1: + ctx.deps.prompt = user_prompt_parts[0].content + else: + # Combine multiple UserPromptPart contents + combined_content: list[_messages.UserContent] = [] + for part in user_prompt_parts: + if isinstance(part.content, str): + combined_content.append(part.content) + else: + combined_content.extend(part.content) + ctx.deps.prompt = combined_content elif isinstance(last_message, _messages.ModelResponse): call_tools_node = await self._handle_message_history_model_response(ctx, last_message) if call_tools_node is not None: diff --git a/tests/test_agent.py b/tests/test_agent.py index f57787ee2a..7db699c4e1 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -2004,7 +2004,10 @@ async def ret_a(x: str) -> str: def test_run_with_history_ending_on_model_request_and_no_user_prompt(): messages: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content='Hello')], instructions='Original instructions'), + ModelRequest( + parts=[UserPromptPart(content=['Hello', ImageUrl('https://example.com/image.jpg')])], + instructions='Original instructions', + ), ] m = TestModel() @@ -2012,7 +2015,7 @@ def test_run_with_history_ending_on_model_request_and_no_user_prompt(): @agent.instructions async def instructions(ctx: RunContext) -> str: - assert ctx.prompt == 'Hello' + assert ctx.prompt == ['Hello', ImageUrl('https://example.com/image.jpg')] return 'New instructions' result = agent.run_sync(message_history=messages) @@ -2021,7 +2024,7 @@ async def instructions(ctx: RunContext) -> str: ModelRequest( parts=[ UserPromptPart( - content='Hello', + content=['Hello', ImageUrl('https://example.com/image.jpg')], timestamp=IsDatetime(), ) ], From 37e7a58ab50cd0d0ea111e75c1f10cae3602a27c Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 15 Sep 2025 19:02:32 +0000 Subject: [PATCH 3/3] improve --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 32 +++++++++++-------- tests/test_agent.py | 33 +++++++++++++++----- 2 files changed, 44 insertions(+), 21 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 8d298d0c3d..e81711e8e6 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -167,7 +167,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]): system_prompt_functions: list[_system_prompt.SystemPromptRunner[DepsT]] system_prompt_dynamic_functions: dict[str, _system_prompt.SystemPromptRunner[DepsT]] - async def run( + async def run( # noqa: C901 self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]] ) -> ModelRequestNode[DepsT, NodeRunEndT] | CallToolsNode[DepsT, NodeRunEndT]: try: @@ -186,8 +186,6 @@ async def run( # Use the `capture_run_messages` list as the message history so that new messages are added to it ctx.state.message_history = messages - parts: list[_messages.ModelRequestPart] = [] - if (tool_call_results := ctx.deps.tool_call_results) is not None: if messages and (last_message := messages[-1]) and isinstance(last_message, _messages.ModelRequest): # If tool call results were provided, that means the previous run ended on deferred tool calls. @@ -202,19 +200,20 @@ async def run( if not messages: raise exceptions.UserError('Tool call results were provided, but the message history is empty.') + next_message: _messages.ModelRequest | None = None + if messages and (last_message := messages[-1]): if isinstance(last_message, _messages.ModelRequest) and self.user_prompt is None: # Drop last message from history and reuse its parts messages.pop() - parts.extend(last_message.parts) - # Extract UserPromptPart content from the popped message and add to ctx.deps.prompt + next_message = _messages.ModelRequest(parts=last_message.parts) + + # Extract `UserPromptPart` content from the popped message and add to `ctx.deps.prompt` user_prompt_parts = [part for part in last_message.parts if isinstance(part, _messages.UserPromptPart)] if user_prompt_parts: - # For single part, use content directly; for multiple parts, combine them if len(user_prompt_parts) == 1: ctx.deps.prompt = user_prompt_parts[0].content else: - # Combine multiple UserPromptPart contents combined_content: list[_messages.UserContent] = [] for part in user_prompt_parts: if isinstance(part.content, str): @@ -227,19 +226,26 @@ async def run( if call_tools_node is not None: return call_tools_node + # Build the run context after `ctx.deps.prompt` has been updated run_context = build_run_context(ctx) + parts: list[_messages.ModelRequestPart] = [] if messages: - # Reevaluate any dynamic system prompt parts await self._reevaluate_dynamic_prompts(messages, run_context) + + if next_message: + await self._reevaluate_dynamic_prompts([next_message], run_context) else: - parts.extend(await self._sys_parts(run_context)) + parts: list[_messages.ModelRequestPart] = [] + if not messages: + parts.extend(await self._sys_parts(run_context)) + + if self.user_prompt is not None: + parts.append(_messages.UserPromptPart(self.user_prompt)) - if self.user_prompt is not None: - parts.append(_messages.UserPromptPart(self.user_prompt)) + next_message = _messages.ModelRequest(parts=parts) - instructions = await ctx.deps.get_instructions(run_context) - next_message = _messages.ModelRequest(parts, instructions=instructions) + next_message.instructions = await ctx.deps.get_instructions(run_context) return ModelRequestNode[DepsT, NodeRunEndT](request=next_message) diff --git a/tests/test_agent.py b/tests/test_agent.py index 7db699c4e1..6ee8a0225f 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -2003,19 +2003,27 @@ async def ret_a(x: str) -> str: def test_run_with_history_ending_on_model_request_and_no_user_prompt(): + m = TestModel() + agent = Agent(m) + + @agent.system_prompt(dynamic=True) + async def system_prompt(ctx: RunContext) -> str: + return f'System prompt: user prompt length = {len(ctx.prompt or [])}' + messages: list[ModelMessage] = [ ModelRequest( - parts=[UserPromptPart(content=['Hello', ImageUrl('https://example.com/image.jpg')])], + parts=[ + SystemPromptPart(content='System prompt', dynamic_ref=system_prompt.__qualname__), + UserPromptPart(content=['Hello', ImageUrl('https://example.com/image.jpg')]), + UserPromptPart(content='How goes it?'), + ], instructions='Original instructions', ), ] - m = TestModel() - agent = Agent(m) - @agent.instructions async def instructions(ctx: RunContext) -> str: - assert ctx.prompt == ['Hello', ImageUrl('https://example.com/image.jpg')] + assert ctx.prompt == ['Hello', ImageUrl('https://example.com/image.jpg'), 'How goes it?'] return 'New instructions' result = agent.run_sync(message_history=messages) @@ -2023,16 +2031,25 @@ async def instructions(ctx: RunContext) -> str: [ ModelRequest( parts=[ + SystemPromptPart( + content='System prompt: user prompt length = 3', + timestamp=IsDatetime(), + dynamic_ref=IsStr(), + ), UserPromptPart( - content=['Hello', ImageUrl('https://example.com/image.jpg')], + content=['Hello', ImageUrl(url='https://example.com/image.jpg', identifier='39cfc4')], timestamp=IsDatetime(), - ) + ), + UserPromptPart( + content='How goes it?', + timestamp=IsDatetime(), + ), ], instructions='New instructions', ), ModelResponse( parts=[TextPart(content='success (no tool calls)')], - usage=RequestUsage(input_tokens=51, output_tokens=4), + usage=RequestUsage(input_tokens=61, output_tokens=4), model_name='test', timestamp=IsDatetime(), ),