Skip to content

Commit e9811ac

Browse files
Merge pull request #154 from openai/release-please--branches--main--changes--next
release: 0.7.0
2 parents 58e8bde + 5356bc1 commit e9811ac

28 files changed

+1792
-21
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.6.0"
2+
".": "0.7.0"
33
}

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 109
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0205acb1015d29b2312a48526734c0399f93026d4fe2dff5c7768f566e333fd2.yml
3-
openapi_spec_hash: 1772cc9056c2f6dfb2a4e9cb77ee6343
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4865dda2b62927bd141cbc85f81be3d88602f103e2c581e15eb1caded3e3aaa2.yml
3+
openapi_spec_hash: 7d14a9b23ef4ac93ea46d629601b6f6b
44
config_hash: ed1e6b3c5f93d12b80d31167f55c557c

CHANGELOG.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,20 @@
11
# Changelog
22

3+
## 0.7.0 (2025-06-09)
4+
5+
Full Changelog: [v0.6.0...v0.7.0](https://github.com/openai/openai-ruby/compare/v0.6.0...v0.7.0)
6+
7+
### Features
8+
9+
* **api:** Add tools and structured outputs to evals ([6ee3392](https://github.com/openai/openai-ruby/commit/6ee33924e9146e2450e9c43d052886ed3214cbde))
10+
11+
12+
### Bug Fixes
13+
14+
* default content-type for text in multi-part formdata uploads should be text/plain ([105cf47](https://github.com/openai/openai-ruby/commit/105cf4717993c744ee6c453d2a99ae03f51035d4))
15+
* tool parameter mapping for chat completions ([#156](https://github.com/openai/openai-ruby/issues/156)) ([5999b9f](https://github.com/openai/openai-ruby/commit/5999b9f6ad6dc73a290a8ef7b1b52bd89897039c))
16+
* tool parameter mapping for responses ([#704](https://github.com/openai/openai-ruby/issues/704)) ([ac8bf11](https://github.com/openai/openai-ruby/commit/ac8bf11cf59fcc778f1658429a1fc06eaca79bba))
17+
318
## 0.6.0 (2025-06-03)
419

520
Full Changelog: [v0.5.1...v0.6.0](https://github.com/openai/openai-ruby/compare/v0.5.1...v0.6.0)

Gemfile.lock

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ GIT
1111
PATH
1212
remote: .
1313
specs:
14-
openai (0.6.0)
14+
openai (0.7.0)
1515
connection_pool
1616

1717
GEM

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
1515
<!-- x-release-please-start-version -->
1616

1717
```ruby
18-
gem "openai", "~> 0.6.0"
18+
gem "openai", "~> 0.7.0"
1919
```
2020

2121
<!-- x-release-please-end -->

lib/openai/internal/util.rb

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -497,7 +497,7 @@ class << self
497497
# @param closing [Array<Proc>]
498498
# @param content_type [String, nil]
499499
private def write_multipart_content(y, val:, closing:, content_type: nil)
500-
content_type ||= "application/octet-stream"
500+
content_line = "Content-Type: %s\r\n\r\n"
501501

502502
case val
503503
in OpenAI::FilePart
@@ -508,24 +508,21 @@ class << self
508508
content_type: val.content_type
509509
)
510510
in Pathname
511-
y << "Content-Type: #{content_type}\r\n\r\n"
511+
y << format(content_line, content_type || "application/octet-stream")
512512
io = val.open(binmode: true)
513513
closing << io.method(:close)
514514
IO.copy_stream(io, y)
515515
in IO
516-
y << "Content-Type: #{content_type}\r\n\r\n"
516+
y << format(content_line, content_type || "application/octet-stream")
517517
IO.copy_stream(val, y)
518518
in StringIO
519-
y << "Content-Type: #{content_type}\r\n\r\n"
519+
y << format(content_line, content_type || "application/octet-stream")
520520
y << val.string
521-
in String
522-
y << "Content-Type: #{content_type}\r\n\r\n"
523-
y << val.to_s
524521
in -> { primitive?(_1) }
525-
y << "Content-Type: text/plain\r\n\r\n"
522+
y << format(content_line, content_type || "text/plain")
526523
y << val.to_s
527524
else
528-
y << "Content-Type: application/json\r\n\r\n"
525+
y << format(content_line, content_type || "application/json")
529526
y << JSON.generate(val)
530527
end
531528
y << "\r\n"
@@ -563,6 +560,8 @@ class << self
563560

564561
# @api private
565562
#
563+
# https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.1.md#special-considerations-for-multipart-content
564+
#
566565
# @param body [Object]
567566
#
568567
# @return [Array(String, Enumerable<String>)]

lib/openai/models/evals/create_eval_completions_run_data_source.rb

Lines changed: 67 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,24 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
432432
# @return [Integer, nil]
433433
optional :max_completion_tokens, Integer
434434

435+
# @!attribute response_format
436+
# An object specifying the format that the model must output.
437+
#
438+
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
439+
# Outputs which ensures the model will match your supplied JSON schema. Learn more
440+
# in the
441+
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
442+
#
443+
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
444+
# ensures the message the model generates is valid JSON. Using `json_schema` is
445+
# preferred for models that support it.
446+
#
447+
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject, nil]
448+
optional :response_format,
449+
union: -> {
450+
OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::ResponseFormat
451+
}
452+
435453
# @!attribute seed
436454
# A seed value to initialize the randomness, during sampling.
437455
#
@@ -444,20 +462,68 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
444462
# @return [Float, nil]
445463
optional :temperature, Float
446464

465+
# @!attribute tools
466+
# A list of tools the model may call. Currently, only functions are supported as a
467+
# tool. Use this to provide a list of functions the model may generate JSON inputs
468+
# for. A max of 128 functions are supported.
469+
#
470+
# @return [Array<OpenAI::Models::Chat::ChatCompletionTool>, nil]
471+
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTool] }
472+
447473
# @!attribute top_p
448474
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
449475
#
450476
# @return [Float, nil]
451477
optional :top_p, Float
452478

453-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, top_p: nil)
479+
# @!method initialize(max_completion_tokens: nil, response_format: nil, seed: nil, temperature: nil, tools: nil, top_p: nil)
480+
# Some parameter documentations has been truncated, see
481+
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams} for
482+
# more details.
483+
#
454484
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
455485
#
486+
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
487+
#
456488
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
457489
#
458490
# @param temperature [Float] A higher temperature increases randomness in the outputs.
459491
#
492+
# @param tools [Array<OpenAI::Models::Chat::ChatCompletionTool>] A list of tools the model may call. Currently, only functions are supported as a
493+
#
460494
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
495+
496+
# An object specifying the format that the model must output.
497+
#
498+
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
499+
# Outputs which ensures the model will match your supplied JSON schema. Learn more
500+
# in the
501+
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
502+
#
503+
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
504+
# ensures the message the model generates is valid JSON. Using `json_schema` is
505+
# preferred for models that support it.
506+
#
507+
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams#response_format
508+
module ResponseFormat
509+
extend OpenAI::Internal::Type::Union
510+
511+
# Default response format. Used to generate text responses.
512+
variant -> { OpenAI::ResponseFormatText }
513+
514+
# JSON Schema response format. Used to generate structured JSON responses.
515+
# Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
516+
variant -> { OpenAI::ResponseFormatJSONSchema }
517+
518+
# JSON object response format. An older method of generating JSON responses.
519+
# Using `json_schema` is recommended for models that support it. Note that the
520+
# model will not generate JSON without a system or user message instructing it
521+
# to do so.
522+
variant -> { OpenAI::ResponseFormatJSONObject }
523+
524+
# @!method self.variants
525+
# @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)]
526+
end
461527
end
462528
end
463529
end

lib/openai/models/evals/run_cancel_response.rb

Lines changed: 77 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -616,20 +616,96 @@ class SamplingParams < OpenAI::Internal::Type::BaseModel
616616
# @return [Float, nil]
617617
optional :temperature, Float
618618

619+
# @!attribute text
620+
# Configuration options for a text response from the model. Can be plain text or
621+
# structured JSON data. Learn more:
622+
#
623+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
624+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
625+
#
626+
# @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, nil]
627+
optional :text,
628+
-> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text }
629+
630+
# @!attribute tools
631+
# An array of tools the model may call while generating a response. You can
632+
# specify which tool to use by setting the `tool_choice` parameter.
633+
#
634+
# The two categories of tools you can provide the model are:
635+
#
636+
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
637+
# capabilities, like
638+
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
639+
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
640+
# Learn more about
641+
# [built-in tools](https://platform.openai.com/docs/guides/tools).
642+
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
643+
# the model to call your own code. Learn more about
644+
# [function calling](https://platform.openai.com/docs/guides/function-calling).
645+
#
646+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
647+
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
648+
619649
# @!attribute top_p
620650
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
621651
#
622652
# @return [Float, nil]
623653
optional :top_p, Float
624654

625-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, top_p: nil)
655+
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
656+
# Some parameter documentations has been truncated, see
657+
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams}
658+
# for more details.
659+
#
626660
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
627661
#
628662
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
629663
#
630664
# @param temperature [Float] A higher temperature increases randomness in the outputs.
631665
#
666+
# @param text [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
667+
#
668+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
669+
#
632670
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
671+
672+
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams#text
673+
class Text < OpenAI::Internal::Type::BaseModel
674+
# @!attribute format_
675+
# An object specifying the format that the model must output.
676+
#
677+
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
678+
# ensures the model will match your supplied JSON schema. Learn more in the
679+
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
680+
#
681+
# The default format is `{ "type": "text" }` with no additional options.
682+
#
683+
# **Not recommended for gpt-4o and newer models:**
684+
#
685+
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
686+
# ensures the message the model generates is valid JSON. Using `json_schema` is
687+
# preferred for models that support it.
688+
#
689+
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
690+
optional :format_,
691+
union: -> {
692+
OpenAI::Responses::ResponseFormatTextConfig
693+
},
694+
api_name: :format
695+
696+
# @!method initialize(format_: nil)
697+
# Some parameter documentations has been truncated, see
698+
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text}
699+
# for more details.
700+
#
701+
# Configuration options for a text response from the model. Can be plain text or
702+
# structured JSON data. Learn more:
703+
#
704+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
705+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
706+
#
707+
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
708+
end
633709
end
634710
end
635711

0 commit comments

Comments
 (0)