diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e0b8841fba..407051a9fb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.89.0" + ".": "1.90.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 7e42b77a27..f8abf5bab6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9e41d2d5471d2c28bff0d616f4476f5b0e6c541ef4cb51bdaaef5fdf5e13c8b2.yml -openapi_spec_hash: 86f765e18d00e32cf2ce9db7ab84d946 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f411a68f272b8be0ab0c266043da33228687b9b2d76896724e3cef797de9563d.yml +openapi_spec_hash: 89bf866ea95ecfb3d76c8833237047d6 config_hash: dc5515e257676a27cb1ace1784aa92b3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6557ddeab6..dc45fa7bb5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.90.0 (2025-06-20) + +Full Changelog: [v1.89.0...v1.90.0](https://github.com/openai/openai-python/compare/v1.89.0...v1.90.0) + +### Features + +* **api:** make model and inputs not required to create response ([11bd62e](https://github.com/openai/openai-python/commit/11bd62eb7e46eec748edaf2e0cecf253ffc1202c)) + ## 1.89.0 (2025-06-20) Full Changelog: [v1.88.0...v1.89.0](https://github.com/openai/openai-python/compare/v1.88.0...v1.89.0) diff --git a/pyproject.toml b/pyproject.toml index 90716f994f..f66dacbf6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.89.0" +version = "1.90.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 46a41a551e..7e515c74bd 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.89.0" # x-release-please-version +__version__ = "1.90.0" # x-release-please-version diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 3276501494..841d198a5b 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -10,7 +10,7 @@ from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import is_given, required_args, maybe_transform, async_maybe_transform +from ..._utils import is_given, maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper @@ -76,13 +76,13 @@ def with_streaming_response(self) -> ResponsesWithStreamingResponse: def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, @@ -119,22 +119,6 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). @@ -154,6 +138,16 @@ def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous @@ -171,6 +165,12 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create @@ -274,14 +274,14 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: Literal[True], background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, @@ -317,22 +317,6 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -359,6 +343,16 @@ def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous @@ -376,6 +370,12 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create @@ -472,14 +472,14 @@ def create( def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: bool, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, @@ -515,22 +515,6 @@ def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -557,6 +541,16 @@ def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous @@ -574,6 +568,12 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create @@ -666,17 +666,16 @@ def create( """ ... - @required_args(["input", "model"], ["input", "model", "stream"]) def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, @@ -702,13 +701,13 @@ def create( "/responses", body=maybe_transform( { - "input": input, - "model": model, "background": background, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, @@ -1295,13 +1294,13 @@ def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse: async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, @@ -1338,22 +1337,6 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). @@ -1373,6 +1356,16 @@ async def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous @@ -1390,6 +1383,12 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create @@ -1493,14 +1492,14 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: Literal[True], background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, @@ -1536,22 +1535,6 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -1578,6 +1561,16 @@ async def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous @@ -1595,6 +1588,12 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create @@ -1691,14 +1690,14 @@ async def create( async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, stream: bool, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, @@ -1734,22 +1733,6 @@ async def create( your own data as input for the model's response. Args: - input: Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - - model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - stream: If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). @@ -1776,6 +1759,16 @@ async def create( - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. + input: Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + instructions: A system (or developer) message inserted into the model's context. When using along with `previous_response_id`, the instructions from a previous @@ -1793,6 +1786,12 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + parallel_tool_calls: Whether to allow the model to run tool calls in parallel. previous_response_id: The unique ID of the previous response to the model. Use this to create @@ -1885,17 +1884,16 @@ async def create( """ ... - @required_args(["input", "model"], ["input", "model", "stream"]) async def create( self, *, - input: Union[str, ResponseInputParam], - model: ResponsesModel, background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, @@ -1921,13 +1919,13 @@ async def create( "/responses", body=await async_maybe_transform( { - "input": input, - "model": model, "background": background, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 976ae9741d..22acd6f653 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -26,27 +26,6 @@ class ResponseCreateParamsBase(TypedDict, total=False): - input: Required[Union[str, ResponseInputParam]] - """Text, image, or file inputs to the model, used to generate a response. - - Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Image inputs](https://platform.openai.com/docs/guides/images) - - [File inputs](https://platform.openai.com/docs/guides/pdf-files) - - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) - - [Function calling](https://platform.openai.com/docs/guides/function-calling) - """ - - model: Required[ResponsesModel] - """Model ID used to generate the response, like `gpt-4o` or `o3`. - - OpenAI offers a wide range of models with different capabilities, performance - characteristics, and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - """ - background: Optional[bool] """Whether to run the model response in the background. @@ -72,6 +51,18 @@ class ResponseCreateParamsBase(TypedDict, total=False): in code interpreter tool call items. """ + input: Union[str, ResponseInputParam] + """Text, image, or file inputs to the model, used to generate a response. + + Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Image inputs](https://platform.openai.com/docs/guides/images) + - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + - [Function calling](https://platform.openai.com/docs/guides/function-calling) + """ + instructions: Optional[str] """A system (or developer) message inserted into the model's context. @@ -97,6 +88,15 @@ class ResponseCreateParamsBase(TypedDict, total=False): a maximum length of 512 characters. """ + model: ResponsesModel + """Model ID used to generate the response, like `gpt-4o` or `o3`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + parallel_tool_calls: Optional[bool] """Whether to allow the model to run tool calls in parallel.""" diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 6aaf0ea17f..5b7559655a 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -21,22 +21,19 @@ class TestResponses: @parametrize def test_method_create_overload_1(self, client: OpenAI) -> None: - response = client.responses.create( - input="string", - model="gpt-4o", - ) + response = client.responses.create() assert_matches_type(Response, response, path=["response"]) @parametrize def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: response = client.responses.create( - input="string", - model="gpt-4o", background=True, include=["file_search_call.results"], + input="string", instructions="instructions", max_output_tokens=0, metadata={"foo": "string"}, + model="gpt-4o", parallel_tool_calls=True, previous_response_id="previous_response_id", prompt={ @@ -72,10 +69,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_1(self, client: OpenAI) -> None: - http_response = client.responses.with_raw_response.create( - input="string", - model="gpt-4o", - ) + http_response = client.responses.with_raw_response.create() assert http_response.is_closed is True assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -84,10 +78,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: - with client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", - ) as http_response: + with client.responses.with_streaming_response.create() as http_response: assert not http_response.is_closed assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -99,8 +90,6 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_create_overload_2(self, client: OpenAI) -> None: response_stream = client.responses.create( - input="string", - model="gpt-4o", stream=True, ) response_stream.response.close() @@ -108,14 +97,14 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: response_stream = client.responses.create( - input="string", - model="gpt-4o", stream=True, background=True, include=["file_search_call.results"], + input="string", instructions="instructions", max_output_tokens=0, metadata={"foo": "string"}, + model="gpt-4o", parallel_tool_calls=True, previous_response_id="previous_response_id", prompt={ @@ -151,8 +140,6 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.responses.with_raw_response.create( - input="string", - model="gpt-4o", stream=True, ) @@ -163,8 +150,6 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", stream=True, ) as response: assert not response.is_closed @@ -358,22 +343,19 @@ class TestAsyncResponses: @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: - response = await async_client.responses.create( - input="string", - model="gpt-4o", - ) + response = await async_client.responses.create() assert_matches_type(Response, response, path=["response"]) @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.responses.create( - input="string", - model="gpt-4o", background=True, include=["file_search_call.results"], + input="string", instructions="instructions", max_output_tokens=0, metadata={"foo": "string"}, + model="gpt-4o", parallel_tool_calls=True, previous_response_id="previous_response_id", prompt={ @@ -409,10 +391,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: - http_response = await async_client.responses.with_raw_response.create( - input="string", - model="gpt-4o", - ) + http_response = await async_client.responses.with_raw_response.create() assert http_response.is_closed is True assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -421,10 +400,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: - async with async_client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", - ) as http_response: + async with async_client.responses.with_streaming_response.create() as http_response: assert not http_response.is_closed assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -436,8 +412,6 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: response_stream = await async_client.responses.create( - input="string", - model="gpt-4o", stream=True, ) await response_stream.response.aclose() @@ -445,14 +419,14 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: response_stream = await async_client.responses.create( - input="string", - model="gpt-4o", stream=True, background=True, include=["file_search_call.results"], + input="string", instructions="instructions", max_output_tokens=0, metadata={"foo": "string"}, + model="gpt-4o", parallel_tool_calls=True, previous_response_id="previous_response_id", prompt={ @@ -488,8 +462,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.responses.with_raw_response.create( - input="string", - model="gpt-4o", stream=True, ) @@ -500,8 +472,6 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.responses.with_streaming_response.create( - input="string", - model="gpt-4o", stream=True, ) as response: assert not response.is_closed