Skip to content

Commit 690b6a7

Browse files
feat(api): Updating Assistants and Evals API schemas
1 parent b35ea63 commit 690b6a7

File tree

105 files changed

+12576
-2463
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

105 files changed

+12576
-2463
lines changed

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 99
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml
3-
openapi_spec_hash: 602e14add4bee018c6774e320ce309b8
4-
config_hash: bdacc55eb995c15255ec82130eb8c3bb
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5fa16b9a02985ae06e41be14946a9c325dc672fb014b3c19abca65880c6990e6.yml
3+
openapi_spec_hash: da3e669f65130043b1170048c0727890
4+
config_hash: d8d5fda350f6db77c784f35429741a2e

lib/openai.rb

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,6 @@
182182
require_relative "openai/models/beta/threads/text_delta_block"
183183
require_relative "openai/models/beta/thread_stream_event"
184184
require_relative "openai/models/beta/thread_update_params"
185-
require_relative "openai/models/beta/truncation_object"
186185
require_relative "openai/models/chat/chat_completion"
187186
require_relative "openai/models/chat/chat_completion_assistant_message_param"
188187
require_relative "openai/models/chat/chat_completion_audio"
@@ -235,18 +234,13 @@
235234
require_relative "openai/models/eval_custom_data_source_config"
236235
require_relative "openai/models/eval_delete_params"
237236
require_relative "openai/models/eval_delete_response"
238-
require_relative "openai/models/eval_item"
239237
require_relative "openai/models/eval_list_params"
240238
require_relative "openai/models/eval_list_response"
241-
require_relative "openai/models/eval_logs_data_source_config"
242239
require_relative "openai/models/eval_retrieve_params"
243240
require_relative "openai/models/eval_retrieve_response"
244241
require_relative "openai/models/evals/create_eval_completions_run_data_source"
245242
require_relative "openai/models/evals/create_eval_jsonl_run_data_source"
246-
require_relative "openai/models/evals/create_eval_responses_run_data_source"
247243
require_relative "openai/models/evals/eval_api_error"
248-
require_relative "openai/models/evals/eval_jsonl_file_content_source"
249-
require_relative "openai/models/evals/eval_jsonl_file_id_source"
250244
require_relative "openai/models/evals/run_cancel_params"
251245
require_relative "openai/models/evals/run_cancel_response"
252246
require_relative "openai/models/evals/run_create_params"
@@ -431,7 +425,6 @@
431425
require_relative "openai/models/vector_store_create_params"
432426
require_relative "openai/models/vector_store_deleted"
433427
require_relative "openai/models/vector_store_delete_params"
434-
require_relative "openai/models/vector_store_expiration_after"
435428
require_relative "openai/models/vector_store_list_params"
436429
require_relative "openai/models/vector_store_retrieve_params"
437430
require_relative "openai/models/vector_stores/file_batch_cancel_params"

lib/openai/models.rb

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,8 @@ module OpenAI
8686

8787
EvalDeleteParams = OpenAI::Models::EvalDeleteParams
8888

89-
EvalItem = OpenAI::Models::EvalItem
90-
9189
EvalListParams = OpenAI::Models::EvalListParams
9290

93-
EvalLogsDataSourceConfig = OpenAI::Models::EvalLogsDataSourceConfig
94-
9591
EvalRetrieveParams = OpenAI::Models::EvalRetrieveParams
9692

9793
Evals = OpenAI::Models::Evals
@@ -208,8 +204,6 @@ module OpenAI
208204

209205
VectorStoreDeleteParams = OpenAI::Models::VectorStoreDeleteParams
210206

211-
VectorStoreExpirationAfter = OpenAI::Models::VectorStoreExpirationAfter
212-
213207
VectorStoreListParams = OpenAI::Models::VectorStoreListParams
214208

215209
VectorStoreRetrieveParams = OpenAI::Models::VectorStoreRetrieveParams

lib/openai/models/beta/thread_create_and_run_params.rb

Lines changed: 50 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -159,8 +159,10 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel
159159
# Controls for how a thread will be truncated prior to the run. Use this to
160160
# control the intial context window of the run.
161161
#
162-
# @return [OpenAI::Beta::TruncationObject, nil]
163-
optional :truncation_strategy, -> { OpenAI::Beta::TruncationObject }, nil?: true
162+
# @return [OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil]
163+
optional :truncation_strategy,
164+
-> { OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy },
165+
nil?: true
164166

165167
# @!method initialize(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
166168
# Some parameter documentations has been truncated, see
@@ -194,7 +196,7 @@ class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel
194196
#
195197
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
196198
#
197-
# @param truncation_strategy [OpenAI::Beta::TruncationObject, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
199+
# @param truncation_strategy [OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
198200
#
199201
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
200202

@@ -706,6 +708,51 @@ class FileSearch < OpenAI::Internal::Type::BaseModel
706708
# @param vector_store_ids [Array<String>] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect
707709
end
708710
end
711+
712+
class TruncationStrategy < OpenAI::Internal::Type::BaseModel
713+
# @!attribute type
714+
# The truncation strategy to use for the thread. The default is `auto`. If set to
715+
# `last_messages`, the thread will be truncated to the n most recent messages in
716+
# the thread. When set to `auto`, messages in the middle of the thread will be
717+
# dropped to fit the context length of the model, `max_prompt_tokens`.
718+
#
719+
# @return [Symbol, OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type]
720+
required :type, enum: -> { OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type }
721+
722+
# @!attribute last_messages
723+
# The number of most recent messages from the thread when constructing the context
724+
# for the run.
725+
#
726+
# @return [Integer, nil]
727+
optional :last_messages, Integer, nil?: true
728+
729+
# @!method initialize(type:, last_messages: nil)
730+
# Some parameter documentations has been truncated, see
731+
# {OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy} for more details.
732+
#
733+
# Controls for how a thread will be truncated prior to the run. Use this to
734+
# control the intial context window of the run.
735+
#
736+
# @param type [Symbol, OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
737+
#
738+
# @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context
739+
740+
# The truncation strategy to use for the thread. The default is `auto`. If set to
741+
# `last_messages`, the thread will be truncated to the n most recent messages in
742+
# the thread. When set to `auto`, messages in the middle of the thread will be
743+
# dropped to fit the context length of the model, `max_prompt_tokens`.
744+
#
745+
# @see OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy#type
746+
module Type
747+
extend OpenAI::Internal::Type::Enum
748+
749+
AUTO = :auto
750+
LAST_MESSAGES = :last_messages
751+
752+
# @!method self.values
753+
# @return [Array<Symbol>]
754+
end
755+
end
709756
end
710757
end
711758
end

lib/openai/models/beta/threads/run.rb

Lines changed: 49 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -197,8 +197,8 @@ class Run < OpenAI::Internal::Type::BaseModel
197197
# Controls for how a thread will be truncated prior to the run. Use this to
198198
# control the intial context window of the run.
199199
#
200-
# @return [OpenAI::Beta::TruncationObject, nil]
201-
required :truncation_strategy, -> { OpenAI::Beta::TruncationObject }, nil?: true
200+
# @return [OpenAI::Beta::Threads::Run::TruncationStrategy, nil]
201+
required :truncation_strategy, -> { OpenAI::Beta::Threads::Run::TruncationStrategy }, nil?: true
202202

203203
# @!attribute usage
204204
# Usage statistics related to the run. This value will be `null` if the run is not
@@ -270,7 +270,7 @@ class Run < OpenAI::Internal::Type::BaseModel
270270
#
271271
# @param tools [Array<OpenAI::Beta::CodeInterpreterTool, OpenAI::Beta::FileSearchTool, OpenAI::Beta::FunctionTool>] The list of tools that the [assistant](https://platform.openai.com/docs/api-refe
272272
#
273-
# @param truncation_strategy [OpenAI::Beta::TruncationObject, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
273+
# @param truncation_strategy [OpenAI::Beta::Threads::Run::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
274274
#
275275
# @param usage [OpenAI::Beta::Threads::Run::Usage, nil] Usage statistics related to the run. This value will be `null` if the run is not
276276
#
@@ -392,6 +392,52 @@ class SubmitToolOutputs < OpenAI::Internal::Type::BaseModel
392392
end
393393
end
394394

395+
# @see OpenAI::Beta::Threads::Run#truncation_strategy
396+
class TruncationStrategy < OpenAI::Internal::Type::BaseModel
397+
# @!attribute type
398+
# The truncation strategy to use for the thread. The default is `auto`. If set to
399+
# `last_messages`, the thread will be truncated to the n most recent messages in
400+
# the thread. When set to `auto`, messages in the middle of the thread will be
401+
# dropped to fit the context length of the model, `max_prompt_tokens`.
402+
#
403+
# @return [Symbol, OpenAI::Beta::Threads::Run::TruncationStrategy::Type]
404+
required :type, enum: -> { OpenAI::Beta::Threads::Run::TruncationStrategy::Type }
405+
406+
# @!attribute last_messages
407+
# The number of most recent messages from the thread when constructing the context
408+
# for the run.
409+
#
410+
# @return [Integer, nil]
411+
optional :last_messages, Integer, nil?: true
412+
413+
# @!method initialize(type:, last_messages: nil)
414+
# Some parameter documentations has been truncated, see
415+
# {OpenAI::Beta::Threads::Run::TruncationStrategy} for more details.
416+
#
417+
# Controls for how a thread will be truncated prior to the run. Use this to
418+
# control the intial context window of the run.
419+
#
420+
# @param type [Symbol, OpenAI::Beta::Threads::Run::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
421+
#
422+
# @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context
423+
424+
# The truncation strategy to use for the thread. The default is `auto`. If set to
425+
# `last_messages`, the thread will be truncated to the n most recent messages in
426+
# the thread. When set to `auto`, messages in the middle of the thread will be
427+
# dropped to fit the context length of the model, `max_prompt_tokens`.
428+
#
429+
# @see OpenAI::Beta::Threads::Run::TruncationStrategy#type
430+
module Type
431+
extend OpenAI::Internal::Type::Enum
432+
433+
AUTO = :auto
434+
LAST_MESSAGES = :last_messages
435+
436+
# @!method self.values
437+
# @return [Array<Symbol>]
438+
end
439+
end
440+
395441
# @see OpenAI::Beta::Threads::Run#usage
396442
class Usage < OpenAI::Internal::Type::BaseModel
397443
# @!attribute completion_tokens

lib/openai/models/beta/threads/run_create_params.rb

Lines changed: 50 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -187,8 +187,10 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel
187187
# Controls for how a thread will be truncated prior to the run. Use this to
188188
# control the intial context window of the run.
189189
#
190-
# @return [OpenAI::Beta::TruncationObject, nil]
191-
optional :truncation_strategy, -> { OpenAI::Beta::TruncationObject }, nil?: true
190+
# @return [OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy, nil]
191+
optional :truncation_strategy,
192+
-> { OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy },
193+
nil?: true
192194

193195
# @!method initialize(assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
194196
# Some parameter documentations has been truncated, see
@@ -226,7 +228,7 @@ class RunCreateParams < OpenAI::Internal::Type::BaseModel
226228
#
227229
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
228230
#
229-
# @param truncation_strategy [OpenAI::Beta::TruncationObject, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
231+
# @param truncation_strategy [OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
230232
#
231233
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
232234

@@ -417,6 +419,51 @@ module Model
417419
T.type_alias { T.any(String, OpenAI::ChatModel::TaggedSymbol) }
418420
end
419421
end
422+
423+
class TruncationStrategy < OpenAI::Internal::Type::BaseModel
424+
# @!attribute type
425+
# The truncation strategy to use for the thread. The default is `auto`. If set to
426+
# `last_messages`, the thread will be truncated to the n most recent messages in
427+
# the thread. When set to `auto`, messages in the middle of the thread will be
428+
# dropped to fit the context length of the model, `max_prompt_tokens`.
429+
#
430+
# @return [Symbol, OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type]
431+
required :type, enum: -> { OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type }
432+
433+
# @!attribute last_messages
434+
# The number of most recent messages from the thread when constructing the context
435+
# for the run.
436+
#
437+
# @return [Integer, nil]
438+
optional :last_messages, Integer, nil?: true
439+
440+
# @!method initialize(type:, last_messages: nil)
441+
# Some parameter documentations has been truncated, see
442+
# {OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy} for more details.
443+
#
444+
# Controls for how a thread will be truncated prior to the run. Use this to
445+
# control the intial context window of the run.
446+
#
447+
# @param type [Symbol, OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
448+
#
449+
# @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context
450+
451+
# The truncation strategy to use for the thread. The default is `auto`. If set to
452+
# `last_messages`, the thread will be truncated to the n most recent messages in
453+
# the thread. When set to `auto`, messages in the middle of the thread will be
454+
# dropped to fit the context length of the model, `max_prompt_tokens`.
455+
#
456+
# @see OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy#type
457+
module Type
458+
extend OpenAI::Internal::Type::Enum
459+
460+
AUTO = :auto
461+
LAST_MESSAGES = :last_messages
462+
463+
# @!method self.values
464+
# @return [Array<Symbol>]
465+
end
466+
end
420467
end
421468
end
422469
end

lib/openai/models/beta/truncation_object.rb

Lines changed: 0 additions & 52 deletions
This file was deleted.

lib/openai/models/chat_model.rb

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ module ChatModel
3737
GPT_4O_SEARCH_PREVIEW_2025_03_11 = :"gpt-4o-search-preview-2025-03-11"
3838
GPT_4O_MINI_SEARCH_PREVIEW_2025_03_11 = :"gpt-4o-mini-search-preview-2025-03-11"
3939
CHATGPT_4O_LATEST = :"chatgpt-4o-latest"
40+
CODEX_MINI_LATEST = :"codex-mini-latest"
4041
GPT_4O_MINI = :"gpt-4o-mini"
4142
GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18"
4243
GPT_4_TURBO = :"gpt-4-turbo"

0 commit comments

Comments
 (0)