Skip to content

Commit 643da8d

Browse files
authored
depricated model GPT3Ada changed to GPT3Babbage002 (#843)
* depricated model GPT3Ada changed to GPT3Babbage002 * Delete test.mp3
1 parent c37cf9a commit 643da8d

File tree

4 files changed

+6
-6
lines changed

4 files changed

+6
-6
lines changed

README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ func main() {
141141
ctx := context.Background()
142142

143143
req := openai.CompletionRequest{
144-
Model: openai.GPT3Ada,
144+
Model: openai.GPT3Babbage002,
145145
MaxTokens: 5,
146146
Prompt: "Lorem ipsum",
147147
}
@@ -174,7 +174,7 @@ func main() {
174174
ctx := context.Background()
175175

176176
req := openai.CompletionRequest{
177-
Model: openai.GPT3Ada,
177+
Model: openai.GPT3Babbage002,
178178
MaxTokens: 5,
179179
Prompt: "Lorem ipsum",
180180
Stream: true,

example_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ func ExampleClient_CreateCompletion() {
8282
resp, err := client.CreateCompletion(
8383
context.Background(),
8484
openai.CompletionRequest{
85-
Model: openai.GPT3Ada,
85+
Model: openai.GPT3Babbage002,
8686
MaxTokens: 5,
8787
Prompt: "Lorem ipsum",
8888
},
@@ -99,7 +99,7 @@ func ExampleClient_CreateCompletionStream() {
9999
stream, err := client.CreateCompletionStream(
100100
context.Background(),
101101
openai.CompletionRequest{
102-
Model: openai.GPT3Ada,
102+
Model: openai.GPT3Babbage002,
103103
MaxTokens: 5,
104104
Prompt: "Lorem ipsum",
105105
Stream: true,

examples/completion/main.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ func main() {
1313
resp, err := client.CreateCompletion(
1414
context.Background(),
1515
openai.CompletionRequest{
16-
Model: openai.GPT3Ada,
16+
Model: openai.GPT3Babbage002,
1717
MaxTokens: 5,
1818
Prompt: "Lorem ipsum",
1919
},

stream_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ func TestCreateCompletionStreamRateLimitError(t *testing.T) {
169169
var apiErr *openai.APIError
170170
_, err := client.CreateCompletionStream(context.Background(), openai.CompletionRequest{
171171
MaxTokens: 5,
172-
Model: openai.GPT3Ada,
172+
Model: openai.GPT3Babbage002,
173173
Prompt: "Hello!",
174174
Stream: true,
175175
})

0 commit comments

Comments
 (0)