Skip to content

Commit 84712fa

Browse files
docs: improve examples
1 parent bc60c2e commit 84712fa

11 files changed

Lines changed: 84 additions & 84 deletions

File tree

tests/api_resources/audio/test_speech.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
2727
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
2828
speech = client.audio.speech.create(
2929
input="string",
30-
model="string",
31-
voice="string",
30+
model="tts-1",
31+
voice="alloy",
3232
)
3333
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
3434
assert speech.json() == {"foo": "bar"}
@@ -39,8 +39,8 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
3939
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
4040
speech = client.audio.speech.create(
4141
input="string",
42-
model="string",
43-
voice="string",
42+
model="tts-1",
43+
voice="alloy",
4444
instructions="instructions",
4545
response_format="mp3",
4646
speed=0.25,
@@ -56,8 +56,8 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No
5656

5757
response = client.audio.speech.with_raw_response.create(
5858
input="string",
59-
model="string",
60-
voice="string",
59+
model="tts-1",
60+
voice="alloy",
6161
)
6262

6363
assert response.is_closed is True
@@ -71,8 +71,8 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter)
7171
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
7272
with client.audio.speech.with_streaming_response.create(
7373
input="string",
74-
model="string",
75-
voice="string",
74+
model="tts-1",
75+
voice="alloy",
7676
) as response:
7777
assert not response.is_closed
7878
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -94,8 +94,8 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo
9494
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
9595
speech = await async_client.audio.speech.create(
9696
input="string",
97-
model="string",
98-
voice="string",
97+
model="tts-1",
98+
voice="alloy",
9999
)
100100
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
101101
assert speech.json() == {"foo": "bar"}
@@ -106,8 +106,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re
106106
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
107107
speech = await async_client.audio.speech.create(
108108
input="string",
109-
model="string",
110-
voice="string",
109+
model="tts-1",
110+
voice="alloy",
111111
instructions="instructions",
112112
response_format="mp3",
113113
speed=0.25,
@@ -123,8 +123,8 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock:
123123

124124
response = await async_client.audio.speech.with_raw_response.create(
125125
input="string",
126-
model="string",
127-
voice="string",
126+
model="tts-1",
127+
voice="alloy",
128128
)
129129

130130
assert response.is_closed is True
@@ -138,8 +138,8 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_
138138
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
139139
async with async_client.audio.speech.with_streaming_response.create(
140140
input="string",
141-
model="string",
142-
voice="string",
141+
model="tts-1",
142+
voice="alloy",
143143
) as response:
144144
assert not response.is_closed
145145
assert response.http_request.headers.get("X-Stainless-Lang") == "python"

tests/api_resources/beta/test_assistants.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None:
149149
description="description",
150150
instructions="instructions",
151151
metadata={"foo": "string"},
152-
model="string",
152+
model="gpt-5",
153153
name="name",
154154
reasoning_effort="none",
155155
response_format="auto",
@@ -414,7 +414,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) ->
414414
description="description",
415415
instructions="instructions",
416416
metadata={"foo": "string"},
417-
model="string",
417+
model="gpt-5",
418418
name="name",
419419
reasoning_effort="none",
420420
response_format="auto",

tests/api_resources/beta/test_threads.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI)
248248
max_completion_tokens=256,
249249
max_prompt_tokens=256,
250250
metadata={"foo": "string"},
251-
model="string",
251+
model="gpt-5.4",
252252
parallel_tool_calls=True,
253253
response_format="auto",
254254
stream=False,
@@ -343,7 +343,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI)
343343
max_completion_tokens=256,
344344
max_prompt_tokens=256,
345345
metadata={"foo": "string"},
346-
model="string",
346+
model="gpt-5.4",
347347
parallel_tool_calls=True,
348348
response_format="auto",
349349
temperature=1,
@@ -649,7 +649,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie
649649
max_completion_tokens=256,
650650
max_prompt_tokens=256,
651651
metadata={"foo": "string"},
652-
model="string",
652+
model="gpt-5.4",
653653
parallel_tool_calls=True,
654654
response_format="auto",
655655
stream=False,
@@ -744,7 +744,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie
744744
max_completion_tokens=256,
745745
max_prompt_tokens=256,
746746
metadata={"foo": "string"},
747-
model="string",
747+
model="gpt-5.4",
748748
parallel_tool_calls=True,
749749
response_format="auto",
750750
temperature=1,

tests/api_resources/beta/threads/test_runs.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
5757
max_completion_tokens=256,
5858
max_prompt_tokens=256,
5959
metadata={"foo": "string"},
60-
model="string",
60+
model="gpt-5.4",
6161
parallel_tool_calls=True,
6262
reasoning_effort="none",
6363
response_format="auto",
@@ -148,7 +148,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
148148
max_completion_tokens=256,
149149
max_prompt_tokens=256,
150150
metadata={"foo": "string"},
151-
model="string",
151+
model="gpt-5.4",
152152
parallel_tool_calls=True,
153153
reasoning_effort="none",
154154
response_format="auto",
@@ -607,7 +607,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
607607
max_completion_tokens=256,
608608
max_prompt_tokens=256,
609609
metadata={"foo": "string"},
610-
model="string",
610+
model="gpt-5.4",
611611
parallel_tool_calls=True,
612612
reasoning_effort="none",
613613
response_format="auto",
@@ -698,7 +698,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
698698
max_completion_tokens=256,
699699
max_prompt_tokens=256,
700700
metadata={"foo": "string"},
701-
model="string",
701+
model="gpt-5.4",
702702
parallel_tool_calls=True,
703703
reasoning_effort="none",
704704
response_format="auto",

tests/api_resources/chat/test_completions.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
4848
model="gpt-5.4",
4949
audio={
5050
"format": "wav",
51-
"voice": "string",
51+
"voice": "alloy",
5252
},
5353
frequency_penalty=-2,
5454
function_call="none",
@@ -182,7 +182,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
182182
stream=True,
183183
audio={
184184
"format": "wav",
185-
"voice": "string",
185+
"voice": "alloy",
186186
},
187187
frequency_penalty=-2,
188188
function_call="none",
@@ -491,7 +491,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
491491
model="gpt-5.4",
492492
audio={
493493
"format": "wav",
494-
"voice": "string",
494+
"voice": "alloy",
495495
},
496496
frequency_penalty=-2,
497497
function_call="none",
@@ -625,7 +625,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
625625
stream=True,
626626
audio={
627627
"format": "wav",
628-
"voice": "string",
628+
"voice": "alloy",
629629
},
630630
frequency_penalty=-2,
631631
function_call="none",

tests/api_resources/realtime/test_calls.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
4848
"noise_reduction": {"type": "near_field"},
4949
"transcription": {
5050
"language": "language",
51-
"model": "string",
51+
"model": "whisper-1",
5252
"prompt": "prompt",
5353
},
5454
"turn_detection": {
@@ -67,13 +67,13 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
6767
"type": "audio/pcm",
6868
},
6969
"speed": 0.25,
70-
"voice": "string",
70+
"voice": "alloy",
7171
},
7272
},
7373
"include": ["item.input_audio_transcription.logprobs"],
7474
"instructions": "instructions",
75-
"max_output_tokens": 0,
76-
"model": "string",
75+
"max_output_tokens": "inf",
76+
"model": "gpt-realtime",
7777
"output_modalities": ["text"],
7878
"prompt": {
7979
"id": "id",
@@ -147,7 +147,7 @@ def test_method_accept_with_all_params(self, client: OpenAI) -> None:
147147
"noise_reduction": {"type": "near_field"},
148148
"transcription": {
149149
"language": "language",
150-
"model": "string",
150+
"model": "whisper-1",
151151
"prompt": "prompt",
152152
},
153153
"turn_detection": {
@@ -166,13 +166,13 @@ def test_method_accept_with_all_params(self, client: OpenAI) -> None:
166166
"type": "audio/pcm",
167167
},
168168
"speed": 0.25,
169-
"voice": "string",
169+
"voice": "alloy",
170170
},
171171
},
172172
include=["item.input_audio_transcription.logprobs"],
173173
instructions="instructions",
174-
max_output_tokens=0,
175-
model="string",
174+
max_output_tokens="inf",
175+
model="gpt-realtime",
176176
output_modalities=["text"],
177177
prompt={
178178
"id": "id",
@@ -386,7 +386,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re
386386
"noise_reduction": {"type": "near_field"},
387387
"transcription": {
388388
"language": "language",
389-
"model": "string",
389+
"model": "whisper-1",
390390
"prompt": "prompt",
391391
},
392392
"turn_detection": {
@@ -405,13 +405,13 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re
405405
"type": "audio/pcm",
406406
},
407407
"speed": 0.25,
408-
"voice": "string",
408+
"voice": "alloy",
409409
},
410410
},
411411
"include": ["item.input_audio_transcription.logprobs"],
412412
"instructions": "instructions",
413-
"max_output_tokens": 0,
414-
"model": "string",
413+
"max_output_tokens": "inf",
414+
"model": "gpt-realtime",
415415
"output_modalities": ["text"],
416416
"prompt": {
417417
"id": "id",
@@ -485,7 +485,7 @@ async def test_method_accept_with_all_params(self, async_client: AsyncOpenAI) ->
485485
"noise_reduction": {"type": "near_field"},
486486
"transcription": {
487487
"language": "language",
488-
"model": "string",
488+
"model": "whisper-1",
489489
"prompt": "prompt",
490490
},
491491
"turn_detection": {
@@ -504,13 +504,13 @@ async def test_method_accept_with_all_params(self, async_client: AsyncOpenAI) ->
504504
"type": "audio/pcm",
505505
},
506506
"speed": 0.25,
507-
"voice": "string",
507+
"voice": "alloy",
508508
},
509509
},
510510
include=["item.input_audio_transcription.logprobs"],
511511
instructions="instructions",
512-
max_output_tokens=0,
513-
model="string",
512+
max_output_tokens="inf",
513+
model="gpt-realtime",
514514
output_modalities=["text"],
515515
prompt={
516516
"id": "id",

tests/api_resources/realtime/test_client_secrets.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
4040
"noise_reduction": {"type": "near_field"},
4141
"transcription": {
4242
"language": "language",
43-
"model": "string",
43+
"model": "whisper-1",
4444
"prompt": "prompt",
4545
},
4646
"turn_detection": {
@@ -59,13 +59,13 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
5959
"type": "audio/pcm",
6060
},
6161
"speed": 0.25,
62-
"voice": "string",
62+
"voice": "alloy",
6363
},
6464
},
6565
"include": ["item.input_audio_transcription.logprobs"],
6666
"instructions": "instructions",
67-
"max_output_tokens": 0,
68-
"model": "string",
67+
"max_output_tokens": "inf",
68+
"model": "gpt-realtime",
6969
"output_modalities": ["text"],
7070
"prompt": {
7171
"id": "id",
@@ -136,7 +136,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
136136
"noise_reduction": {"type": "near_field"},
137137
"transcription": {
138138
"language": "language",
139-
"model": "string",
139+
"model": "whisper-1",
140140
"prompt": "prompt",
141141
},
142142
"turn_detection": {
@@ -155,13 +155,13 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
155155
"type": "audio/pcm",
156156
},
157157
"speed": 0.25,
158-
"voice": "string",
158+
"voice": "alloy",
159159
},
160160
},
161161
"include": ["item.input_audio_transcription.logprobs"],
162162
"instructions": "instructions",
163-
"max_output_tokens": 0,
164-
"model": "string",
163+
"max_output_tokens": "inf",
164+
"model": "gpt-realtime",
165165
"output_modalities": ["text"],
166166
"prompt": {
167167
"id": "id",

0 commit comments

Comments
 (0)