Skip to content

Commit 9d24119

Browse files
committed
regenerate from spec 1.0.1
1 parent 4ac4160 commit 9d24119

File tree

3 files changed

+396
-22
lines changed

3 files changed

+396
-22
lines changed

api.ts

Lines changed: 182 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import { BASE_PATH, COLLECTION_FORMATS, RequestArgs, BaseAPI, RequiredError } fr
2828
*/
2929
export interface CreateAnswerRequest {
3030
/**
31-
* ID of the engine to use for completion.
31+
* ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
3232
* @type {string}
3333
* @memberof CreateAnswerRequest
3434
*/
@@ -64,7 +64,7 @@ export interface CreateAnswerRequest {
6464
*/
6565
'file'?: string | null;
6666
/**
67-
* ID of the engine to use for [Search](/docs/api-reference/searches/create).
67+
* ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
6868
* @type {string}
6969
* @memberof CreateAnswerRequest
7070
*/
@@ -199,7 +199,7 @@ export interface CreateAnswerResponseSelectedDocuments {
199199
*/
200200
export interface CreateClassificationRequest {
201201
/**
202-
* ID of the engine to use for completion.
202+
* ID of the engine to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
203203
* @type {string}
204204
* @memberof CreateClassificationRequest
205205
*/
@@ -229,7 +229,7 @@ export interface CreateClassificationRequest {
229229
*/
230230
'labels'?: Array<string> | null;
231231
/**
232-
* ID of the engine to use for [Search](/docs/api-reference/searches/create).
232+
* ID of the engine to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
233233
* @type {string}
234234
* @memberof CreateClassificationRequest
235235
*/
@@ -345,6 +345,110 @@ export interface CreateClassificationResponseSelectedExamples {
345345
*/
346346
'label'?: string;
347347
}
348+
/**
349+
*
350+
* @export
351+
* @interface CreateCompletionFromModelRequest
352+
*/
353+
export interface CreateCompletionFromModelRequest {
354+
/**
355+
* The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
356+
* @type {string | Array<string> | Array<number> | Array<any>}
357+
* @memberof CreateCompletionFromModelRequest
358+
*/
359+
'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
360+
/**
361+
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
362+
* @type {number}
363+
* @memberof CreateCompletionFromModelRequest
364+
*/
365+
'max_tokens'?: number | null;
366+
/**
367+
* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
368+
* @type {number}
369+
* @memberof CreateCompletionFromModelRequest
370+
*/
371+
'temperature'?: number | null;
372+
/**
373+
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
374+
* @type {number}
375+
* @memberof CreateCompletionFromModelRequest
376+
*/
377+
'top_p'?: number | null;
378+
/**
379+
* How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
380+
* @type {number}
381+
* @memberof CreateCompletionFromModelRequest
382+
*/
383+
'n'?: number | null;
384+
/**
385+
* Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
386+
* @type {boolean}
387+
* @memberof CreateCompletionFromModelRequest
388+
*/
389+
'stream'?: boolean | null;
390+
/**
391+
* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact [email protected] and describe your use case.
392+
* @type {number}
393+
* @memberof CreateCompletionFromModelRequest
394+
*/
395+
'logprobs'?: number | null;
396+
/**
397+
* Echo back the prompt in addition to the completion
398+
* @type {boolean}
399+
* @memberof CreateCompletionFromModelRequest
400+
*/
401+
'echo'?: boolean | null;
402+
/**
403+
* Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
404+
* @type {string | Array<string>}
405+
* @memberof CreateCompletionFromModelRequest
406+
*/
407+
'stop'?: string | Array<string> | null;
408+
/**
409+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
410+
* @type {number}
411+
* @memberof CreateCompletionFromModelRequest
412+
*/
413+
'presence_penalty'?: number | null;
414+
/**
415+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
416+
* @type {number}
417+
* @memberof CreateCompletionFromModelRequest
418+
*/
419+
'frequency_penalty'?: number | null;
420+
/**
421+
* Generates `best_of` completions server-side and returns the \"best\" (the one with the lowest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
422+
* @type {number}
423+
* @memberof CreateCompletionFromModelRequest
424+
*/
425+
'best_of'?: number | null;
426+
/**
427+
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
428+
* @type {object}
429+
* @memberof CreateCompletionFromModelRequest
430+
*/
431+
'logit_bias'?: object | null;
432+
/**
433+
* ID of the model to use for completion.
434+
* @type {string}
435+
* @memberof CreateCompletionFromModelRequest
436+
*/
437+
'model'?: string;
438+
}
439+
/**
440+
*
441+
* @export
442+
* @interface CreateCompletionFromModelRequestAllOf
443+
*/
444+
export interface CreateCompletionFromModelRequestAllOf {
445+
/**
446+
* ID of the model to use for completion.
447+
* @type {string}
448+
* @memberof CreateCompletionFromModelRequestAllOf
449+
*/
450+
'model'?: string;
451+
}
348452
/**
349453
*
350454
* @export
@@ -358,7 +462,7 @@ export interface CreateCompletionRequest {
358462
*/
359463
'prompt'?: string | Array<string> | Array<number> | Array<any> | null;
360464
/**
361-
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `davinci-codex`, which supports 4096).
465+
* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. Most models have a context length of 2048 tokens (except `code-davinci-001`, which supports 4096).
362466
* @type {number}
363467
* @memberof CreateCompletionRequest
364468
*/
@@ -1208,6 +1312,42 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
12081312
options: localVarRequestOptions,
12091313
};
12101314
},
1315+
/**
1316+
*
1317+
* @summary Creates a completion using a fine-tuned model
1318+
* @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1319+
* @param {*} [options] Override http request option.
1320+
* @throws {RequiredError}
1321+
*/
1322+
createCompletionFromModel: async (createCompletionFromModelRequest: CreateCompletionFromModelRequest, options: AxiosRequestConfig = {}): Promise<RequestArgs> => {
1323+
// verify required parameter 'createCompletionFromModelRequest' is not null or undefined
1324+
assertParamExists('createCompletionFromModel', 'createCompletionFromModelRequest', createCompletionFromModelRequest)
1325+
const localVarPath = `/completions`;
1326+
// use dummy base URL string because the URL constructor only accepts absolute URLs.
1327+
const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
1328+
let baseOptions;
1329+
if (configuration) {
1330+
baseOptions = configuration.baseOptions;
1331+
}
1332+
1333+
const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
1334+
const localVarHeaderParameter = {} as any;
1335+
const localVarQueryParameter = {} as any;
1336+
1337+
1338+
1339+
localVarHeaderParameter['Content-Type'] = 'application/json';
1340+
1341+
setSearchParams(localVarUrlObj, localVarQueryParameter);
1342+
let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
1343+
localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
1344+
localVarRequestOptions.data = serializeDataIfNeeded(createCompletionFromModelRequest, localVarRequestOptions, configuration)
1345+
1346+
return {
1347+
url: toPathString(localVarUrlObj),
1348+
options: localVarRequestOptions,
1349+
};
1350+
},
12111351
/**
12121352
*
12131353
* @summary Creates an embedding vector representing the input text.
@@ -1335,7 +1475,7 @@ export const OpenAIApiAxiosParamCreator = function (configuration?: Configuratio
13351475
/**
13361476
*
13371477
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1338-
* @param {string} engineId The ID of the engine to use for this request
1478+
* @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
13391479
* @param {CreateSearchRequest} createSearchRequest
13401480
* @param {*} [options] Override http request option.
13411481
* @throws {RequiredError}
@@ -1726,6 +1866,17 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
17261866
const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletion(engineId, createCompletionRequest, options);
17271867
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
17281868
},
1869+
/**
1870+
*
1871+
* @summary Creates a completion using a fine-tuned model
1872+
* @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
1873+
* @param {*} [options] Override http request option.
1874+
* @throws {RequiredError}
1875+
*/
1876+
async createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise<CreateCompletionResponse>> {
1877+
const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletionFromModel(createCompletionFromModelRequest, options);
1878+
return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
1879+
},
17291880
/**
17301881
*
17311882
* @summary Creates an embedding vector representing the input text.
@@ -1764,7 +1915,7 @@ export const OpenAIApiFp = function(configuration?: Configuration) {
17641915
/**
17651916
*
17661917
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1767-
* @param {string} engineId The ID of the engine to use for this request
1918+
* @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
17681919
* @param {CreateSearchRequest} createSearchRequest
17691920
* @param {*} [options] Override http request option.
17701921
* @throws {RequiredError}
@@ -1921,6 +2072,16 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
19212072
createCompletion(engineId: string, createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
19222073
return localVarFp.createCompletion(engineId, createCompletionRequest, options).then((request) => request(axios, basePath));
19232074
},
2075+
/**
2076+
*
2077+
* @summary Creates a completion using a fine-tuned model
2078+
* @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
2079+
* @param {*} [options] Override http request option.
2080+
* @throws {RequiredError}
2081+
*/
2082+
createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: any): AxiosPromise<CreateCompletionResponse> {
2083+
return localVarFp.createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(axios, basePath));
2084+
},
19242085
/**
19252086
*
19262087
* @summary Creates an embedding vector representing the input text.
@@ -1956,7 +2117,7 @@ export const OpenAIApiFactory = function (configuration?: Configuration, basePat
19562117
/**
19572118
*
19582119
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
1959-
* @param {string} engineId The ID of the engine to use for this request
2120+
* @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
19602121
* @param {CreateSearchRequest} createSearchRequest
19612122
* @param {*} [options] Override http request option.
19622123
* @throws {RequiredError}
@@ -2111,6 +2272,18 @@ export class OpenAIApi extends BaseAPI {
21112272
return OpenAIApiFp(this.configuration).createCompletion(engineId, createCompletionRequest, options).then((request) => request(this.axios, this.basePath));
21122273
}
21132274

2275+
/**
2276+
*
2277+
* @summary Creates a completion using a fine-tuned model
2278+
* @param {CreateCompletionFromModelRequest} createCompletionFromModelRequest
2279+
* @param {*} [options] Override http request option.
2280+
* @throws {RequiredError}
2281+
* @memberof OpenAIApi
2282+
*/
2283+
public createCompletionFromModel(createCompletionFromModelRequest: CreateCompletionFromModelRequest, options?: AxiosRequestConfig) {
2284+
return OpenAIApiFp(this.configuration).createCompletionFromModel(createCompletionFromModelRequest, options).then((request) => request(this.axios, this.basePath));
2285+
}
2286+
21142287
/**
21152288
*
21162289
* @summary Creates an embedding vector representing the input text.
@@ -2152,7 +2325,7 @@ export class OpenAIApi extends BaseAPI {
21522325
/**
21532326
*
21542327
* @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
2155-
* @param {string} engineId The ID of the engine to use for this request
2328+
* @param {string} engineId The ID of the engine to use for this request. You can select one of &#x60;ada&#x60;, &#x60;babbage&#x60;, &#x60;curie&#x60;, or &#x60;davinci&#x60;.
21562329
* @param {CreateSearchRequest} createSearchRequest
21572330
* @param {*} [options] Override http request option.
21582331
* @throws {RequiredError}

0 commit comments

Comments
 (0)