Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 26 additions & 16 deletions pydantic_ai_slim/pydantic_ai/models/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -859,18 +859,28 @@ def _process_response(self, response: responses.Response) -> ModelResponse:
for item in response.output:
if isinstance(item, responses.ResponseReasoningItem):
signature = item.encrypted_content
for summary in item.summary:
# We use the same id for all summaries so that we can merge them on the round trip.
# We only need to store the signature once.
if item.summary:
for summary in item.summary:
# We use the same id for all summaries so that we can merge them on the round trip.
items.append(
ThinkingPart(
content=summary.text,
id=item.id,
signature=signature,
provider_name=self.system if signature else None,
)
)
# We only need to store the signature once.
signature = None
elif signature:
items.append(
ThinkingPart(
content=summary.text,
content='',
id=item.id,
signature=signature,
provider_name=self.system if signature else None,
provider_name=self.system,
)
)
signature = None
# NOTE: We don't currently handle the raw CoT from gpt-oss `reasoning_text`: https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot
# If you need this, please file an issue.
elif isinstance(item, responses.ResponseOutputMessage):
Expand Down Expand Up @@ -1122,20 +1132,20 @@ async def _map_messages( # noqa: C901
# We don't currently track built-in tool calls from OpenAI
pass
elif isinstance(item, ThinkingPart):
if reasoning_item is not None and item.id == reasoning_item['id']:
if reasoning_item is None or reasoning_item['id'] != item.id:
reasoning_item = responses.ResponseReasoningItemParam(
id=item.id or _utils.generate_tool_call_id(),
summary=[],
encrypted_content=item.signature if item.provider_name == self.system else None,
type='reasoning',
)
openai_messages.append(reasoning_item)

if item.content:
reasoning_item['summary'] = [
*reasoning_item['summary'],
Summary(text=item.content, type='summary_text'),
]
continue

reasoning_item = responses.ResponseReasoningItemParam(
id=item.id or _utils.generate_tool_call_id(),
summary=[Summary(text=item.content, type='summary_text')],
encrypted_content=item.signature if item.provider_name == self.system else None,
type='reasoning',
)
openai_messages.append(reasoning_item)
else:
assert_never(item)
else:
Expand Down
57 changes: 57 additions & 0 deletions tests/models/test_openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@

with try_import() as imports_successful:
from openai.types.responses.response_output_message import Content, ResponseOutputMessage, ResponseOutputText
from openai.types.responses.response_reasoning_item import ResponseReasoningItem
from openai.types.responses.response_usage import ResponseUsage

from pydantic_ai.models.openai import OpenAIResponsesModel, OpenAIResponsesModelSettings
Expand Down Expand Up @@ -1404,3 +1405,59 @@ def update_plan(plan: str) -> str:
),
]
)


async def test_openai_responses_thinking_without_summary(allow_model_requests: None):
c = response_message(
[
ResponseReasoningItem(
id='reasoning',
summary=[],
type='reasoning',
encrypted_content='123',
),
ResponseOutputMessage(
id='text',
content=cast(list[Content], [ResponseOutputText(text='4', type='output_text', annotations=[])]),
role='assistant',
status='completed',
type='message',
),
],
)
mock_client = MockOpenAIResponses.create_mock(c)
model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(openai_client=mock_client))

agent = Agent(model=model)
result = await agent.run('What is 2+2?')
assert result.all_messages() == snapshot(
[
ModelRequest(
parts=[
UserPromptPart(
content='What is 2+2?',
timestamp=IsDatetime(),
)
]
),
ModelResponse(
parts=[
ThinkingPart(content='', id='reasoning', signature='123', provider_name='openai'),
TextPart(content='4'),
],
model_name='gpt-4o-123',
timestamp=IsDatetime(),
provider_name='openai',
provider_response_id='123',
),
]
)

_, openai_messages = await model._map_messages(result.all_messages()) # type: ignore[reportPrivateUsage]
assert openai_messages == snapshot(
[
{'role': 'user', 'content': 'What is 2+2?'},
{'id': 'reasoning', 'summary': [], 'encrypted_content': '123', 'type': 'reasoning'},
{'role': 'assistant', 'content': '4'},
]
)