Skip to content

Commit 26db76d

Browse files
chore: use fully qualified names for yard annotations and rbs aliases
1 parent 05b69d1 commit 26db76d

File tree

689 files changed

+7878
-3352
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

689 files changed

+7878
-3352
lines changed

lib/openai/models/all_models.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ module ResponsesOnlyModel
2424
end
2525

2626
# @!method self.variants
27-
# @return [Array(String, Symbol, OpenAI::ChatModel, Symbol, OpenAI::AllModels::ResponsesOnlyModel)]
27+
# @return [Array(String, Symbol, OpenAI::Models::ChatModel, Symbol, OpenAI::Models::AllModels::ResponsesOnlyModel)]
2828
end
2929
end
3030
end

lib/openai/models/audio/speech_create_params.rb

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel
1818
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
1919
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
2020
#
21-
# @return [String, Symbol, OpenAI::Audio::SpeechModel]
21+
# @return [String, Symbol, OpenAI::Models::Audio::SpeechModel]
2222
required :model, union: -> { OpenAI::Audio::SpeechCreateParams::Model }
2323

2424
# @!attribute voice
@@ -27,7 +27,7 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel
2727
# `verse`. Previews of the voices are available in the
2828
# [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
2929
#
30-
# @return [String, Symbol, OpenAI::Audio::SpeechCreateParams::Voice]
30+
# @return [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice]
3131
required :voice, union: -> { OpenAI::Audio::SpeechCreateParams::Voice }
3232

3333
# @!attribute instructions
@@ -41,7 +41,7 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel
4141
# The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
4242
# `wav`, and `pcm`.
4343
#
44-
# @return [Symbol, OpenAI::Audio::SpeechCreateParams::ResponseFormat, nil]
44+
# @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat, nil]
4545
optional :response_format, enum: -> { OpenAI::Audio::SpeechCreateParams::ResponseFormat }
4646

4747
# @!attribute speed
@@ -57,13 +57,13 @@ class SpeechCreateParams < OpenAI::Internal::Type::BaseModel
5757
#
5858
# @param input [String] The text to generate audio for. The maximum length is 4096 characters.
5959
#
60-
# @param model [String, Symbol, OpenAI::Audio::SpeechModel] One of the available [TTS models](https://platform.openai.com/docs/models#tts):
60+
# @param model [String, Symbol, OpenAI::Models::Audio::SpeechModel] One of the available [TTS models](https://platform.openai.com/docs/models#tts):
6161
#
62-
# @param voice [String, Symbol, OpenAI::Audio::SpeechCreateParams::Voice] The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
62+
# @param voice [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
6363
#
6464
# @param instructions [String] Control the voice of your generated audio with additional instructions. Does not
6565
#
66-
# @param response_format [Symbol, OpenAI::Audio::SpeechCreateParams::ResponseFormat] The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav
66+
# @param response_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat] The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav
6767
#
6868
# @param speed [Float] The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
6969
#
@@ -80,7 +80,7 @@ module Model
8080
variant enum: -> { OpenAI::Audio::SpeechModel }
8181

8282
# @!method self.variants
83-
# @return [Array(String, Symbol, OpenAI::Audio::SpeechModel)]
83+
# @return [Array(String, Symbol, OpenAI::Models::Audio::SpeechModel)]
8484
end
8585

8686
# The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
@@ -92,27 +92,27 @@ module Voice
9292

9393
variant String
9494

95-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::ALLOY }
95+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ALLOY }
9696

97-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::ASH }
97+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ASH }
9898

99-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::BALLAD }
99+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::BALLAD }
100100

101-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::CORAL }
101+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::CORAL }
102102

103-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::ECHO }
103+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ECHO }
104104

105-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::FABLE }
105+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::FABLE }
106106

107-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::ONYX }
107+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ONYX }
108108

109-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::NOVA }
109+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::NOVA }
110110

111-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::SAGE }
111+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SAGE }
112112

113-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::SHIMMER }
113+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SHIMMER }
114114

115-
variant const: -> { OpenAI::Audio::SpeechCreateParams::Voice::VERSE }
115+
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::VERSE }
116116

117117
# @!method self.variants
118118
# @return [Array(String, Symbol)]

lib/openai/models/audio/transcription.rb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,19 +15,19 @@ class Transcription < OpenAI::Internal::Type::BaseModel
1515
# models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added
1616
# to the `include` array.
1717
#
18-
# @return [Array<OpenAI::Audio::Transcription::Logprob>, nil]
18+
# @return [Array<OpenAI::Models::Audio::Transcription::Logprob>, nil]
1919
optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::Transcription::Logprob] }
2020

2121
# @!method initialize(text:, logprobs: nil)
2222
# Some parameter documentations has been truncated, see
23-
# {OpenAI::Audio::Transcription} for more details.
23+
# {OpenAI::Models::Audio::Transcription} for more details.
2424
#
2525
# Represents a transcription response returned by model, based on the provided
2626
# input.
2727
#
2828
# @param text [String] The transcribed text.
2929
#
30-
# @param logprobs [Array<OpenAI::Audio::Transcription::Logprob>] The log probabilities of the tokens in the transcription. Only returned with the
30+
# @param logprobs [Array<OpenAI::Models::Audio::Transcription::Logprob>] The log probabilities of the tokens in the transcription. Only returned with the
3131

3232
class Logprob < OpenAI::Internal::Type::BaseModel
3333
# @!attribute token

lib/openai/models/audio/transcription_create_params.rb

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
2222
# `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
2323
# Whisper V2 model).
2424
#
25-
# @return [String, Symbol, OpenAI::AudioModel]
25+
# @return [String, Symbol, OpenAI::Models::AudioModel]
2626
required :model, union: -> { OpenAI::Audio::TranscriptionCreateParams::Model }
2727

2828
# @!attribute chunking_strategy
@@ -31,7 +31,7 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
3131
# boundaries. `server_vad` object can be provided to tweak VAD detection
3232
# parameters manually. If unset, the audio is transcribed as a single block.
3333
#
34-
# @return [Symbol, :auto, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil]
34+
# @return [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil]
3535
optional :chunking_strategy,
3636
union: -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy },
3737
nil?: true
@@ -43,7 +43,7 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
4343
# response_format set to `json` and only with the models `gpt-4o-transcribe` and
4444
# `gpt-4o-mini-transcribe`.
4545
#
46-
# @return [Array<Symbol, OpenAI::Audio::TranscriptionInclude>, nil]
46+
# @return [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>, nil]
4747
optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionInclude] }
4848

4949
# @!attribute language
@@ -68,7 +68,7 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
6868
# `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
6969
# the only supported format is `json`.
7070
#
71-
# @return [Symbol, OpenAI::AudioResponseFormat, nil]
71+
# @return [Symbol, OpenAI::Models::AudioResponseFormat, nil]
7272
optional :response_format, enum: -> { OpenAI::AudioResponseFormat }
7373

7474
# @!attribute temperature
@@ -88,7 +88,7 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
8888
# is no additional latency for segment timestamps, but generating word timestamps
8989
# incurs additional latency.
9090
#
91-
# @return [Array<Symbol, OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity>, nil]
91+
# @return [Array<Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity>, nil]
9292
optional :timestamp_granularities,
9393
-> {
9494
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity]
@@ -100,21 +100,21 @@ class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
100100
#
101101
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl
102102
#
103-
# @param model [String, Symbol, OpenAI::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc
103+
# @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc
104104
#
105-
# @param chunking_strategy [Symbol, :auto, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs
105+
# @param chunking_strategy [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs
106106
#
107-
# @param include [Array<Symbol, OpenAI::Audio::TranscriptionInclude>] Additional information to include in the transcription response.
107+
# @param include [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>] Additional information to include in the transcription response.
108108
#
109109
# @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt
110110
#
111111
# @param prompt [String] An optional text to guide the model's style or continue a previous audio segment
112112
#
113-
# @param response_format [Symbol, OpenAI::AudioResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo
113+
# @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo
114114
#
115115
# @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
116116
#
117-
# @param timestamp_granularities [Array<Symbol, OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity>] The timestamp granularities to populate for this transcription. `response_format
117+
# @param timestamp_granularities [Array<Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity>] The timestamp granularities to populate for this transcription. `response_format
118118
#
119119
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
120120

@@ -130,7 +130,7 @@ module Model
130130
variant enum: -> { OpenAI::AudioModel }
131131

132132
# @!method self.variants
133-
# @return [Array(String, Symbol, OpenAI::AudioModel)]
133+
# @return [Array(String, Symbol, OpenAI::Models::AudioModel)]
134134
end
135135

136136
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
@@ -149,7 +149,7 @@ class VadConfig < OpenAI::Internal::Type::BaseModel
149149
# @!attribute type
150150
# Must be set to `server_vad` to enable manual chunking using server side VAD.
151151
#
152-
# @return [Symbol, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type]
152+
# @return [Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type]
153153
required :type,
154154
enum: -> {
155155
OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type
@@ -179,10 +179,10 @@ class VadConfig < OpenAI::Internal::Type::BaseModel
179179

180180
# @!method initialize(type:, prefix_padding_ms: nil, silence_duration_ms: nil, threshold: nil)
181181
# Some parameter documentations has been truncated, see
182-
# {OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig} for more
183-
# details.
182+
# {OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig}
183+
# for more details.
184184
#
185-
# @param type [Symbol, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type] Must be set to `server_vad` to enable manual chunking using server side VAD.
185+
# @param type [Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type] Must be set to `server_vad` to enable manual chunking using server side VAD.
186186
#
187187
# @param prefix_padding_ms [Integer] Amount of audio to include before the VAD detected speech (in
188188
#
@@ -192,7 +192,7 @@ class VadConfig < OpenAI::Internal::Type::BaseModel
192192

193193
# Must be set to `server_vad` to enable manual chunking using server side VAD.
194194
#
195-
# @see OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig#type
195+
# @see OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig#type
196196
module Type
197197
extend OpenAI::Internal::Type::Enum
198198

@@ -204,7 +204,7 @@ module Type
204204
end
205205

206206
# @!method self.variants
207-
# @return [Array(Symbol, :auto, OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig)]
207+
# @return [Array(Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig)]
208208
end
209209

210210
module TimestampGranularity

lib/openai/models/audio/transcription_create_response.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ module TranscriptionCreateResponse
1919
variant -> { OpenAI::Audio::TranscriptionVerbose }
2020

2121
# @!method self.variants
22-
# @return [Array(OpenAI::Audio::Transcription, OpenAI::Audio::TranscriptionVerbose)]
22+
# @return [Array(OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose)]
2323
end
2424
end
2525
end

lib/openai/models/audio/transcription_segment.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ class TranscriptionSegment < OpenAI::Internal::Type::BaseModel
6969

7070
# @!method initialize(id:, avg_logprob:, compression_ratio:, end_:, no_speech_prob:, seek:, start:, temperature:, text:, tokens:)
7171
# Some parameter documentations has been truncated, see
72-
# {OpenAI::Audio::TranscriptionSegment} for more details.
72+
# {OpenAI::Models::Audio::TranscriptionSegment} for more details.
7373
#
7474
# @param id [Integer] Unique identifier of the segment.
7575
#

lib/openai/models/audio/transcription_stream_event.rb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ module TranscriptionStreamEvent
1919
variant :"transcript.text.done", -> { OpenAI::Audio::TranscriptionTextDoneEvent }
2020

2121
# @!method self.variants
22-
# @return [Array(OpenAI::Audio::TranscriptionTextDeltaEvent, OpenAI::Audio::TranscriptionTextDoneEvent)]
22+
# @return [Array(OpenAI::Models::Audio::TranscriptionTextDeltaEvent, OpenAI::Models::Audio::TranscriptionTextDoneEvent)]
2323
end
2424
end
2525
end

lib/openai/models/audio/transcription_text_delta_event.rb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@ class TranscriptionTextDeltaEvent < OpenAI::Internal::Type::BaseModel
2121
# [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
2222
# with the `include[]` parameter set to `logprobs`.
2323
#
24-
# @return [Array<OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob>, nil]
24+
# @return [Array<OpenAI::Models::Audio::TranscriptionTextDeltaEvent::Logprob>, nil]
2525
optional :logprobs,
2626
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] }
2727

2828
# @!method initialize(delta:, logprobs: nil, type: :"transcript.text.delta")
2929
# Some parameter documentations has been truncated, see
30-
# {OpenAI::Audio::TranscriptionTextDeltaEvent} for more details.
30+
# {OpenAI::Models::Audio::TranscriptionTextDeltaEvent} for more details.
3131
#
3232
# Emitted when there is an additional text delta. This is also the first event
3333
# emitted when the transcription starts. Only emitted when you
@@ -36,7 +36,7 @@ class TranscriptionTextDeltaEvent < OpenAI::Internal::Type::BaseModel
3636
#
3737
# @param delta [String] The text delta that was additionally transcribed.
3838
#
39-
# @param logprobs [Array<OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob>] The log probabilities of the delta. Only included if you [create a transcription
39+
# @param logprobs [Array<OpenAI::Models::Audio::TranscriptionTextDeltaEvent::Logprob>] The log probabilities of the delta. Only included if you [create a transcription
4040
#
4141
# @param type [Symbol, :"transcript.text.delta"] The type of the event. Always `transcript.text.delta`.
4242

@@ -61,7 +61,7 @@ class Logprob < OpenAI::Internal::Type::BaseModel
6161

6262
# @!method initialize(token: nil, bytes: nil, logprob: nil)
6363
# Some parameter documentations has been truncated, see
64-
# {OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob} for more details.
64+
# {OpenAI::Models::Audio::TranscriptionTextDeltaEvent::Logprob} for more details.
6565
#
6666
# @param token [String] The token that was used to generate the log probability.
6767
#

0 commit comments

Comments
 (0)