You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# Some parameter documentations has been truncated, see
99
115
# {OpenAI::Models::ImageEditParams} for more details.
100
116
#
@@ -110,6 +126,10 @@ class ImageEditParams < OpenAI::Internal::Type::BaseModel
110
126
#
111
127
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
112
128
#
129
+
# @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
130
+
#
131
+
# @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
132
+
#
113
133
# @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
114
134
#
115
135
# @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
# @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -310,6 +318,32 @@ module Reason
310
318
end
311
319
end
312
320
321
+
# A system (or developer) message inserted into the model's context.
322
+
#
323
+
# When using along with `previous_response_id`, the instructions from a previous
324
+
# response will not be carried over to the next response. This makes it simple to
325
+
# swap out system (or developer) messages in new responses.
# Some parameter documentations has been truncated, see
231
237
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
232
238
#
@@ -238,7 +244,7 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
238
244
#
239
245
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
240
246
#
241
-
# @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
247
+
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
242
248
#
243
249
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
244
250
#
@@ -248,6 +254,8 @@ class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
248
254
#
249
255
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
250
256
#
257
+
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
0 commit comments