@@ -943,22 +943,27 @@ def stream(
943
943
def parse (
944
944
self ,
945
945
* ,
946
- input : Union [str , ResponseInputParam ],
947
- model : Union [str , ChatModel ],
948
946
text_format : type [TextFormatT ] | NotGiven = NOT_GIVEN ,
949
- tools : Iterable [ ParseableToolParam ] | NotGiven = NOT_GIVEN ,
947
+ background : Optional [ bool ] | NotGiven = NOT_GIVEN ,
950
948
include : Optional [List [ResponseIncludable ]] | NotGiven = NOT_GIVEN ,
949
+ input : Union [str , ResponseInputParam ] | NotGiven = NOT_GIVEN ,
951
950
instructions : Optional [str ] | NotGiven = NOT_GIVEN ,
952
951
max_output_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
952
+ max_tool_calls : Optional [int ] | NotGiven = NOT_GIVEN ,
953
953
metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
954
+ model : ResponsesModel | NotGiven = NOT_GIVEN ,
954
955
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
955
956
previous_response_id : Optional [str ] | NotGiven = NOT_GIVEN ,
957
+ prompt : Optional [ResponsePromptParam ] | NotGiven = NOT_GIVEN ,
956
958
reasoning : Optional [Reasoning ] | NotGiven = NOT_GIVEN ,
959
+ service_tier : Optional [Literal ["auto" , "default" , "flex" , "scale" , "priority" ]] | NotGiven = NOT_GIVEN ,
957
960
store : Optional [bool ] | NotGiven = NOT_GIVEN ,
958
961
stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
959
962
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
960
963
text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
961
964
tool_choice : response_create_params .ToolChoice | NotGiven = NOT_GIVEN ,
965
+ tools : Iterable [ParseableToolParam ] | NotGiven = NOT_GIVEN ,
966
+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
962
967
top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
963
968
truncation : Optional [Literal ["auto" , "disabled" ]] | NotGiven = NOT_GIVEN ,
964
969
user : str | NotGiven = NOT_GIVEN ,
@@ -991,21 +996,26 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
991
996
"/responses" ,
992
997
body = maybe_transform (
993
998
{
994
- "input" : input ,
995
- "model" : model ,
999
+ "background" : background ,
996
1000
"include" : include ,
1001
+ "input" : input ,
997
1002
"instructions" : instructions ,
998
1003
"max_output_tokens" : max_output_tokens ,
1004
+ "max_tool_calls" : max_tool_calls ,
999
1005
"metadata" : metadata ,
1006
+ "model" : model ,
1000
1007
"parallel_tool_calls" : parallel_tool_calls ,
1001
1008
"previous_response_id" : previous_response_id ,
1009
+ "prompt" : prompt ,
1002
1010
"reasoning" : reasoning ,
1011
+ "service_tier" : service_tier ,
1003
1012
"store" : store ,
1004
1013
"stream" : stream ,
1005
1014
"temperature" : temperature ,
1006
1015
"text" : text ,
1007
1016
"tool_choice" : tool_choice ,
1008
1017
"tools" : tools ,
1018
+ "top_logprobs" : top_logprobs ,
1009
1019
"top_p" : top_p ,
1010
1020
"truncation" : truncation ,
1011
1021
"user" : user ,
@@ -2202,22 +2212,27 @@ def stream(
2202
2212
async def parse (
2203
2213
self ,
2204
2214
* ,
2205
- input : Union [str , ResponseInputParam ],
2206
- model : Union [str , ChatModel ],
2207
2215
text_format : type [TextFormatT ] | NotGiven = NOT_GIVEN ,
2208
- tools : Iterable [ ParseableToolParam ] | NotGiven = NOT_GIVEN ,
2216
+ background : Optional [ bool ] | NotGiven = NOT_GIVEN ,
2209
2217
include : Optional [List [ResponseIncludable ]] | NotGiven = NOT_GIVEN ,
2218
+ input : Union [str , ResponseInputParam ] | NotGiven = NOT_GIVEN ,
2210
2219
instructions : Optional [str ] | NotGiven = NOT_GIVEN ,
2211
2220
max_output_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
2221
+ max_tool_calls : Optional [int ] | NotGiven = NOT_GIVEN ,
2212
2222
metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
2223
+ model : ResponsesModel | NotGiven = NOT_GIVEN ,
2213
2224
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
2214
2225
previous_response_id : Optional [str ] | NotGiven = NOT_GIVEN ,
2226
+ prompt : Optional [ResponsePromptParam ] | NotGiven = NOT_GIVEN ,
2215
2227
reasoning : Optional [Reasoning ] | NotGiven = NOT_GIVEN ,
2228
+ service_tier : Optional [Literal ["auto" , "default" , "flex" , "scale" , "priority" ]] | NotGiven = NOT_GIVEN ,
2216
2229
store : Optional [bool ] | NotGiven = NOT_GIVEN ,
2217
2230
stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
2218
2231
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
2219
2232
text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
2220
2233
tool_choice : response_create_params .ToolChoice | NotGiven = NOT_GIVEN ,
2234
+ tools : Iterable [ParseableToolParam ] | NotGiven = NOT_GIVEN ,
2235
+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
2221
2236
top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
2222
2237
truncation : Optional [Literal ["auto" , "disabled" ]] | NotGiven = NOT_GIVEN ,
2223
2238
user : str | NotGiven = NOT_GIVEN ,
@@ -2250,21 +2265,26 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
2250
2265
"/responses" ,
2251
2266
body = maybe_transform (
2252
2267
{
2253
- "input" : input ,
2254
- "model" : model ,
2268
+ "background" : background ,
2255
2269
"include" : include ,
2270
+ "input" : input ,
2256
2271
"instructions" : instructions ,
2257
2272
"max_output_tokens" : max_output_tokens ,
2273
+ "max_tool_calls" : max_tool_calls ,
2258
2274
"metadata" : metadata ,
2275
+ "model" : model ,
2259
2276
"parallel_tool_calls" : parallel_tool_calls ,
2260
2277
"previous_response_id" : previous_response_id ,
2278
+ "prompt" : prompt ,
2261
2279
"reasoning" : reasoning ,
2280
+ "service_tier" : service_tier ,
2262
2281
"store" : store ,
2263
2282
"stream" : stream ,
2264
2283
"temperature" : temperature ,
2265
2284
"text" : text ,
2266
2285
"tool_choice" : tool_choice ,
2267
2286
"tools" : tools ,
2287
+ "top_logprobs" : top_logprobs ,
2268
2288
"top_p" : top_p ,
2269
2289
"truncation" : truncation ,
2270
2290
"user" : user ,
0 commit comments